diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1696,7 +1696,6 @@ } // 12.13. Vector Single-Width Integer Multiply-Add Instructions -let HasPolicy = false in { defm vmacc : RVVIntTerBuiltinSet; defm vnmsac : RVVIntTerBuiltinSet; defm vmadd : RVVIntTerBuiltinSet; @@ -1717,7 +1716,6 @@ defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi", [["vx", "w", "wwUev"]]>; } -} // 12.15. Vector Integer Merge Instructions // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl) @@ -1798,7 +1796,6 @@ } // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions -let HasPolicy = false in { defm vfmacc : RVVFloatingTerBuiltinSet; defm vfnmacc : RVVFloatingTerBuiltinSet; defm vfmsac : RVVFloatingTerBuiltinSet; @@ -1813,7 +1810,6 @@ defm vfwnmacc : RVVFloatingWidenTerBuiltinSet; defm vfwmsac : RVVFloatingWidenTerBuiltinSet; defm vfwnmsac : RVVFloatingWidenTerBuiltinSet; -} // 14.8. Vector Floating-Point Square-Root Instruction def vfsqrt : RVVFloatingUnaryVVBuiltin; @@ -1997,12 +1993,10 @@ } // 17.3. Vector Slide Instructions -let HasPolicy = false in { // 17.3.1. Vector Slideup Instructions defm vslideup : RVVSlideBuiltinSet; // 17.3.2. Vector Slidedown Instructions defm vslidedown : RVVSlideBuiltinSet; -} // 17.3.3. Vector Slide1up Instructions defm vslide1up : RVVSlideOneBuiltinSet; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -219,7 +219,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -229,7 +229,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -271,7 +271,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -282,7 +282,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -292,7 +292,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -345,7 +345,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -355,7 +355,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -219,7 +219,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -229,7 +229,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -271,7 +271,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -282,7 +282,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -292,7 +292,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -345,7 +345,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -355,7 +355,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -219,7 +219,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -229,7 +229,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -271,7 +271,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -282,7 +282,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -292,7 +292,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -345,7 +345,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -355,7 +355,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -219,7 +219,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -229,7 +229,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -271,7 +271,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -282,7 +282,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -292,7 +292,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -345,7 +345,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -355,7 +355,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -272,7 +272,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -272,7 +272,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -272,7 +272,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -272,7 +272,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -129,7 +129,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -129,7 +129,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -129,7 +129,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -129,7 +129,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, @@ -559,7 +559,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, @@ -570,7 +570,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, @@ -580,7 +580,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, @@ -590,7 +590,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, @@ -610,7 +610,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, @@ -632,7 +632,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -643,7 +643,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, @@ -665,7 +665,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, @@ -687,7 +687,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -698,7 +698,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, @@ -709,7 +709,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, @@ -731,7 +731,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -742,7 +742,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, @@ -764,7 +764,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, @@ -775,7 +775,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, @@ -786,7 +786,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, @@ -797,7 +797,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, @@ -808,7 +808,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -818,7 +818,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, @@ -838,7 +838,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -881,7 +881,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, @@ -892,7 +892,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, @@ -903,7 +903,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, @@ -914,7 +914,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, @@ -925,7 +925,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, @@ -947,7 +947,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, @@ -958,7 +958,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -980,7 +980,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, @@ -991,7 +991,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, @@ -1002,7 +1002,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, @@ -1013,7 +1013,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, @@ -1024,7 +1024,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, @@ -1046,7 +1046,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, @@ -1057,7 +1057,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -1079,7 +1079,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, @@ -1090,7 +1090,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, @@ -1101,7 +1101,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, @@ -577,7 +577,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, @@ -587,7 +587,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, @@ -597,7 +597,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, @@ -669,7 +669,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, @@ -680,7 +680,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, @@ -700,7 +700,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, @@ -710,7 +710,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, @@ -740,7 +740,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, @@ -750,7 +750,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, @@ -760,7 +760,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, @@ -782,7 +782,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, @@ -793,7 +793,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -803,7 +803,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, @@ -813,7 +813,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, @@ -823,7 +823,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, @@ -833,7 +833,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, @@ -844,7 +844,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -866,7 +866,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, @@ -877,7 +877,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, @@ -899,7 +899,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, @@ -910,7 +910,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -921,7 +921,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, @@ -932,7 +932,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, @@ -943,7 +943,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -965,7 +965,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, @@ -976,7 +976,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, @@ -998,7 +998,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, @@ -1009,7 +1009,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -1020,7 +1020,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, @@ -1031,7 +1031,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, @@ -1042,7 +1042,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -1064,7 +1064,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, @@ -1075,7 +1075,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c @@ -1056,7 +1056,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1066,7 +1066,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1, @@ -1076,7 +1076,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1, @@ -1096,7 +1096,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, @@ -1106,7 +1106,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1, @@ -1126,7 +1126,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, @@ -1136,7 +1136,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1, @@ -1146,7 +1146,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, @@ -1156,7 +1156,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1, @@ -1166,7 +1166,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1187,7 +1187,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -1207,7 +1207,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, @@ -1217,7 +1217,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -1227,7 +1227,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, @@ -1237,7 +1237,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1, @@ -1247,7 +1247,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1, @@ -1267,7 +1267,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, @@ -1277,7 +1277,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, @@ -1297,7 +1297,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -1307,7 +1307,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, @@ -1317,7 +1317,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -1327,7 +1327,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, @@ -1337,7 +1337,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1, @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, @@ -1357,7 +1357,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, @@ -1378,7 +1378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, @@ -1389,7 +1389,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, @@ -1399,7 +1399,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, @@ -1420,7 +1420,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, @@ -1430,7 +1430,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1, @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, @@ -1450,7 +1450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1, @@ -1460,7 +1460,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, @@ -1470,7 +1470,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1, @@ -1480,7 +1480,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, @@ -1502,7 +1502,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, @@ -1513,7 +1513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, @@ -1523,7 +1523,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, @@ -1534,7 +1534,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, @@ -1544,7 +1544,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, @@ -1555,7 +1555,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, @@ -1565,7 +1565,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, @@ -1576,7 +1576,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, @@ -1586,7 +1586,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, @@ -1597,7 +1597,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, @@ -1607,7 +1607,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, @@ -1628,7 +1628,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, @@ -1639,7 +1639,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, @@ -1649,7 +1649,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, @@ -1660,7 +1660,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, @@ -1670,7 +1670,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1681,7 +1681,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1691,7 +1691,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1702,7 +1702,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1712,7 +1712,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, @@ -1723,7 +1723,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, @@ -1733,7 +1733,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, @@ -1743,7 +1743,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, @@ -1753,7 +1753,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, @@ -1763,7 +1763,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, @@ -1773,7 +1773,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, @@ -1783,7 +1783,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, @@ -1793,7 +1793,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1804,7 +1804,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1815,7 +1815,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -1826,7 +1826,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, @@ -1836,7 +1836,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -1847,7 +1847,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, @@ -1857,7 +1857,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, @@ -1868,7 +1868,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, @@ -1878,7 +1878,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, @@ -1889,7 +1889,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -1910,7 +1910,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, @@ -1920,7 +1920,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -1931,7 +1931,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, @@ -1941,7 +1941,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -1952,7 +1952,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, @@ -1962,7 +1962,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, @@ -1973,7 +1973,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, @@ -1983,7 +1983,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1993,7 +1993,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -2003,7 +2003,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1, @@ -2013,7 +2013,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1, @@ -2023,7 +2023,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1, @@ -2033,7 +2033,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1, @@ -2043,7 +2043,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -2054,7 +2054,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -2064,7 +2064,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -2074,7 +2074,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1, @@ -2084,7 +2084,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1, @@ -2094,7 +2094,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -2104,7 +2104,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -2114,7 +2114,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -2124,7 +2124,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -272,7 +272,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -272,7 +272,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -272,7 +272,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -272,7 +272,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -210,7 +210,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -221,7 +221,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -263,7 +263,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -273,7 +273,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -284,7 +284,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -347,7 +347,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -210,7 +210,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -221,7 +221,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -263,7 +263,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -273,7 +273,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -284,7 +284,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -347,7 +347,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -210,7 +210,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -221,7 +221,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -263,7 +263,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -273,7 +273,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -284,7 +284,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -347,7 +347,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -210,7 +210,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -221,7 +221,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -263,7 +263,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -273,7 +273,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -284,7 +284,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -347,7 +347,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -120,7 +120,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -130,7 +130,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -271,7 +271,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -316,7 +316,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -343,7 +343,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -120,7 +120,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -130,7 +130,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -271,7 +271,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -316,7 +316,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -343,7 +343,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -120,7 +120,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -130,7 +130,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -271,7 +271,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -316,7 +316,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -343,7 +343,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -120,7 +120,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -130,7 +130,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -262,7 +262,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -271,7 +271,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -316,7 +316,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -325,7 +325,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -343,7 +343,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, @@ -560,7 +560,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, @@ -571,7 +571,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, @@ -581,7 +581,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, @@ -601,7 +601,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, @@ -622,7 +622,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, @@ -633,7 +633,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -644,7 +644,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, @@ -655,7 +655,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, @@ -677,7 +677,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, @@ -688,7 +688,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, @@ -710,7 +710,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, @@ -721,7 +721,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, @@ -732,7 +732,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -743,7 +743,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, @@ -754,7 +754,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, @@ -787,7 +787,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, @@ -809,7 +809,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, @@ -829,7 +829,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, @@ -839,7 +839,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, @@ -849,7 +849,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, @@ -860,7 +860,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, @@ -871,7 +871,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, @@ -893,7 +893,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, @@ -904,7 +904,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, @@ -926,7 +926,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -937,7 +937,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, @@ -948,7 +948,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, @@ -959,7 +959,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, @@ -970,7 +970,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, @@ -992,7 +992,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, @@ -1003,7 +1003,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, @@ -1025,7 +1025,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -1036,7 +1036,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, @@ -1047,7 +1047,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, @@ -1069,7 +1069,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, @@ -1091,7 +1091,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, @@ -1102,7 +1102,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, @@ -568,7 +568,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, @@ -578,7 +578,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, @@ -588,7 +588,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, @@ -598,7 +598,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, @@ -608,7 +608,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, @@ -619,7 +619,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -640,7 +640,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, @@ -650,7 +650,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, @@ -660,7 +660,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, @@ -670,7 +670,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -691,7 +691,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, @@ -701,7 +701,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, @@ -721,7 +721,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -731,7 +731,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, @@ -741,7 +741,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, @@ -751,7 +751,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, @@ -761,7 +761,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, @@ -772,7 +772,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, @@ -794,7 +794,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -804,7 +804,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, @@ -814,7 +814,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, @@ -824,7 +824,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, @@ -845,7 +845,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, @@ -856,7 +856,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -867,7 +867,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, @@ -878,7 +878,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, @@ -889,7 +889,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, @@ -911,7 +911,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -922,7 +922,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, @@ -944,7 +944,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, @@ -955,7 +955,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -966,7 +966,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, @@ -977,7 +977,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, @@ -988,7 +988,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, @@ -1010,7 +1010,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -1021,7 +1021,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, @@ -1043,7 +1043,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, @@ -1054,7 +1054,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -1065,7 +1065,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, @@ -1076,7 +1076,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, @@ -1087,7 +1087,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c @@ -1056,7 +1056,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1066,7 +1066,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1, @@ -1076,7 +1076,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1, @@ -1096,7 +1096,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, @@ -1106,7 +1106,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1, @@ -1126,7 +1126,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, @@ -1136,7 +1136,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1, @@ -1146,7 +1146,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, @@ -1156,7 +1156,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1, @@ -1166,7 +1166,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1187,7 +1187,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -1207,7 +1207,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, @@ -1217,7 +1217,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -1227,7 +1227,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, @@ -1237,7 +1237,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1, @@ -1247,7 +1247,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1, @@ -1267,7 +1267,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, @@ -1277,7 +1277,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, @@ -1297,7 +1297,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -1307,7 +1307,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, @@ -1317,7 +1317,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -1327,7 +1327,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, @@ -1337,7 +1337,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1, @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, @@ -1357,7 +1357,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, @@ -1378,7 +1378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, @@ -1389,7 +1389,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, @@ -1399,7 +1399,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, @@ -1420,7 +1420,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, @@ -1430,7 +1430,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1, @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, @@ -1450,7 +1450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1, @@ -1460,7 +1460,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, @@ -1470,7 +1470,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1, @@ -1480,7 +1480,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, @@ -1502,7 +1502,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, @@ -1513,7 +1513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, @@ -1523,7 +1523,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, @@ -1534,7 +1534,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, @@ -1544,7 +1544,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, @@ -1555,7 +1555,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, @@ -1565,7 +1565,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, @@ -1576,7 +1576,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, @@ -1586,7 +1586,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, @@ -1597,7 +1597,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, @@ -1607,7 +1607,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, @@ -1628,7 +1628,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, @@ -1639,7 +1639,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, @@ -1649,7 +1649,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, @@ -1660,7 +1660,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, @@ -1670,7 +1670,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1681,7 +1681,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1691,7 +1691,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1702,7 +1702,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1712,7 +1712,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, @@ -1723,7 +1723,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, @@ -1733,7 +1733,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, @@ -1743,7 +1743,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, @@ -1753,7 +1753,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, @@ -1763,7 +1763,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, @@ -1773,7 +1773,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, @@ -1783,7 +1783,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, @@ -1793,7 +1793,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1804,7 +1804,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1815,7 +1815,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -1826,7 +1826,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, @@ -1836,7 +1836,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -1847,7 +1847,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, @@ -1857,7 +1857,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, @@ -1868,7 +1868,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, @@ -1878,7 +1878,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, @@ -1889,7 +1889,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -1910,7 +1910,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, @@ -1920,7 +1920,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -1931,7 +1931,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, @@ -1941,7 +1941,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -1952,7 +1952,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, @@ -1962,7 +1962,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, @@ -1973,7 +1973,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, @@ -1983,7 +1983,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1993,7 +1993,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -2003,7 +2003,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1, @@ -2013,7 +2013,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1, @@ -2023,7 +2023,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1, @@ -2033,7 +2033,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1, @@ -2043,7 +2043,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -2054,7 +2054,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -2064,7 +2064,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -2074,7 +2074,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1, @@ -2084,7 +2084,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1, @@ -2094,7 +2094,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -2104,7 +2104,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -2114,7 +2114,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -2124,7 +2124,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1, diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -621,8 +621,9 @@ class RISCVTernaryAAAXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], - [IntrNoMem]>, RISCVVIntrinsic { + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<1>, LLVMMatchType<1>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 4; } class RISCVTernaryAAXANoMask @@ -633,11 +634,13 @@ let SplatOperand = 1; let VLOperand = 3; } + // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy class RISCVTernaryAAXAMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty, LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; let VLOperand = 4; } @@ -649,11 +652,13 @@ let SplatOperand = 1; let VLOperand = 3; } + // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy class RISCVTernaryWideMask : Intrinsic< [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty, LLVMMatchType<3>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; let VLOperand = 4; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -994,10 +994,10 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoBinaryMaskTA : +class VPseudoBinaryMaskPolicy : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, Op1Class:$rs2, Op2Class:$rs1, @@ -1546,8 +1546,8 @@ let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoBinaryNoMask; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskTA; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy; } } @@ -1574,8 +1574,8 @@ let VLMul = lmul.value in { def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask; - def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskTA; + def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy; } } @@ -2183,6 +2183,17 @@ } } +multiclass VPseudoTernaryNoMaskNoPolicy { + let VLMul = MInfo.value in { + def "_" # MInfo.MX : VPseudoTernaryNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy; + } +} + multiclass VPseudoTernaryWithPolicy; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy; } } @@ -2206,7 +2217,7 @@ multiclass VPseudoTernaryV_VX { foreach m = MxList in - defm _VX : VPseudoTernary; + defm _VX : VPseudoTernaryNoMaskNoPolicy; } multiclass VPseudoTernaryV_VX_AAXA { @@ -2247,7 +2258,7 @@ multiclass VPseudoTernaryV_VI { foreach m = MxList in - defm _VI : VPseudoTernary; + defm _VI : VPseudoTernaryNoMaskNoPolicy; } multiclass VPseudoVMAC_VV_VX_AAXA { @@ -2328,7 +2339,7 @@ multiclass VPseudoVRED_VS { foreach m = MxList in { defm _VS : VPseudoTernary, - Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV, ReadVIRedV, ReadVMask]>; + Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVMask]>; } } @@ -2863,6 +2874,31 @@ (mask_type V0), GPR:$vl, sew)>; +class VPatTernaryMaskPolicy : + Pat<(result_type (!cast(intrinsic#"_mask") + (result_type result_reg_class:$rs3), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst#"_"#kind#"_"#vlmul.MX # "_MASK") + result_reg_class:$rs3, + (op1_type op1_reg_class:$rs1), + op2_kind:$rs2, + (mask_type V0), + GPR:$vl, sew, (XLenVT timm:$policy))>; + multiclass VPatUnaryS_M { @@ -3434,6 +3470,26 @@ op2_kind>; } +multiclass VPatTernaryNoMaskNoPolicy { + def : VPatTernaryNoMask; + def : VPatTernaryMaskPolicy; +} + multiclass VPatTernaryWithPolicy; - def : VPatTernaryMask; + def : VPatTernaryMaskPolicy; } multiclass VPatTernaryV_VV_AAXA vtilist> { foreach vti = vtilist in - defm : VPatTernary; + defm : VPatTernaryNoMaskNoPolicy; } multiclass VPatTernaryV_VX_AAXA vtilist, Operand Imm_type> { foreach vti = vtilist in - defm : VPatTernary; + defm : VPatTernaryNoMaskNoPolicy; } multiclass VPatTernaryW_VV, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1017,7 +1017,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: @@ -1036,7 +1036,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1073,7 +1073,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: @@ -1092,7 +1092,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1129,7 +1129,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: @@ -1148,7 +1148,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1013,7 +1013,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: @@ -1028,7 +1028,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1061,7 +1061,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: @@ -1076,7 +1076,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1109,7 +1109,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: @@ -1124,7 +1124,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1017,7 +1017,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: @@ -1036,7 +1036,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1073,7 +1073,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: @@ -1092,7 +1092,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1129,7 +1129,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: @@ -1148,7 +1148,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1013,7 +1013,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: @@ -1028,7 +1028,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1061,7 +1061,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: @@ -1076,7 +1076,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1109,7 +1109,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: @@ -1124,7 +1124,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1017,7 +1017,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: @@ -1036,7 +1036,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1073,7 +1073,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: @@ -1092,7 +1092,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1129,7 +1129,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: @@ -1148,7 +1148,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1013,7 +1013,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: @@ -1028,7 +1028,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1061,7 +1061,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: @@ -1076,7 +1076,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1109,7 +1109,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: @@ -1124,7 +1124,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1017,7 +1017,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: @@ -1036,7 +1036,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1073,7 +1073,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: @@ -1092,7 +1092,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1129,7 +1129,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: @@ -1148,7 +1148,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1013,7 +1013,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: @@ -1028,7 +1028,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1061,7 +1061,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: @@ -1076,7 +1076,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1109,7 +1109,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: @@ -1124,7 +1124,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1017,7 +1017,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: @@ -1036,7 +1036,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1073,7 +1073,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: @@ -1092,7 +1092,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1129,7 +1129,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: @@ -1148,7 +1148,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1013,7 +1013,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: @@ -1028,7 +1028,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1061,7 +1061,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: @@ -1076,7 +1076,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1109,7 +1109,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: @@ -1124,7 +1124,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1017,7 +1017,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: @@ -1036,7 +1036,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1073,7 +1073,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: @@ -1092,7 +1092,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1129,7 +1129,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: @@ -1148,7 +1148,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1013,7 +1013,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: @@ -1028,7 +1028,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1061,7 +1061,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: @@ -1076,7 +1076,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1109,7 +1109,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: @@ -1124,7 +1124,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1017,7 +1017,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: @@ -1036,7 +1036,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1073,7 +1073,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: @@ -1092,7 +1092,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1129,7 +1129,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: @@ -1148,7 +1148,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1013,7 +1013,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: @@ -1028,7 +1028,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1061,7 +1061,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: @@ -1076,7 +1076,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1109,7 +1109,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: @@ -1124,7 +1124,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1017,7 +1017,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: @@ -1036,7 +1036,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1073,7 +1073,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: @@ -1092,7 +1092,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1129,7 +1129,7 @@ double, , , - i32); + i32, i32); define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: @@ -1148,7 +1148,7 @@ double %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -581,7 +581,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: @@ -596,7 +596,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -629,7 +629,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: @@ -644,7 +644,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -677,7 +677,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: @@ -692,7 +692,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -725,7 +725,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: @@ -740,7 +740,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -773,7 +773,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: @@ -788,7 +788,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -821,7 +821,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: @@ -836,7 +836,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -869,7 +869,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: @@ -884,7 +884,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -917,7 +917,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: @@ -932,7 +932,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -965,7 +965,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: @@ -980,7 +980,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1013,7 +1013,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: @@ -1028,7 +1028,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1061,7 +1061,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: @@ -1076,7 +1076,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1109,7 +1109,7 @@ double, , , - i64); + i64, i64); define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: @@ -1124,7 +1124,7 @@ double %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -443,7 +443,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: @@ -458,7 +458,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -491,7 +491,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: @@ -506,7 +506,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -539,7 +539,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: @@ -554,7 +554,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -587,7 +587,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: @@ -602,7 +602,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -635,7 +635,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: @@ -650,7 +650,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -683,7 +683,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: @@ -698,7 +698,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -731,7 +731,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: @@ -746,7 +746,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -779,7 +779,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: @@ -794,7 +794,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -827,7 +827,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: @@ -842,7 +842,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -443,7 +443,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: @@ -458,7 +458,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -491,7 +491,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: @@ -506,7 +506,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -539,7 +539,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: @@ -554,7 +554,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -587,7 +587,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: @@ -602,7 +602,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -635,7 +635,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: @@ -650,7 +650,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -683,7 +683,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: @@ -698,7 +698,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -731,7 +731,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: @@ -746,7 +746,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -779,7 +779,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: @@ -794,7 +794,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -827,7 +827,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: @@ -842,7 +842,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -443,7 +443,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: @@ -458,7 +458,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -491,7 +491,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: @@ -506,7 +506,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -539,7 +539,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: @@ -554,7 +554,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -587,7 +587,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: @@ -602,7 +602,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -635,7 +635,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: @@ -650,7 +650,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -683,7 +683,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: @@ -698,7 +698,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -731,7 +731,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: @@ -746,7 +746,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -779,7 +779,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: @@ -794,7 +794,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -827,7 +827,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: @@ -842,7 +842,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -443,7 +443,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: @@ -458,7 +458,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -491,7 +491,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: @@ -506,7 +506,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -539,7 +539,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: @@ -554,7 +554,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -587,7 +587,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: @@ -602,7 +602,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -635,7 +635,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: @@ -650,7 +650,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -683,7 +683,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: @@ -698,7 +698,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -731,7 +731,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: @@ -746,7 +746,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -779,7 +779,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: @@ -794,7 +794,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -827,7 +827,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: @@ -842,7 +842,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -443,7 +443,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: @@ -458,7 +458,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -491,7 +491,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: @@ -506,7 +506,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -539,7 +539,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: @@ -554,7 +554,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -587,7 +587,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: @@ -602,7 +602,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -635,7 +635,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: @@ -650,7 +650,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -683,7 +683,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: @@ -698,7 +698,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -731,7 +731,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: @@ -746,7 +746,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -779,7 +779,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: @@ -794,7 +794,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -827,7 +827,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: @@ -842,7 +842,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -443,7 +443,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: @@ -458,7 +458,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -491,7 +491,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: @@ -506,7 +506,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -539,7 +539,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: @@ -554,7 +554,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -587,7 +587,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: @@ -602,7 +602,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -635,7 +635,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: @@ -650,7 +650,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -683,7 +683,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: @@ -698,7 +698,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -731,7 +731,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: @@ -746,7 +746,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -779,7 +779,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: @@ -794,7 +794,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -827,7 +827,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: @@ -842,7 +842,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -443,7 +443,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: @@ -458,7 +458,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -491,7 +491,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: @@ -506,7 +506,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -539,7 +539,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: @@ -554,7 +554,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -587,7 +587,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: @@ -602,7 +602,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -635,7 +635,7 @@ half, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: @@ -650,7 +650,7 @@ half %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -683,7 +683,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: @@ -698,7 +698,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -731,7 +731,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: @@ -746,7 +746,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -779,7 +779,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: @@ -794,7 +794,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -827,7 +827,7 @@ float, , , - i32); + i32, i32); define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: @@ -842,7 +842,7 @@ float %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -443,7 +443,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: @@ -458,7 +458,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -491,7 +491,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: @@ -506,7 +506,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -539,7 +539,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: @@ -554,7 +554,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -587,7 +587,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: @@ -602,7 +602,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -635,7 +635,7 @@ half, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: @@ -650,7 +650,7 @@ half %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -683,7 +683,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: @@ -698,7 +698,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -731,7 +731,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: @@ -746,7 +746,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -779,7 +779,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: @@ -794,7 +794,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -827,7 +827,7 @@ float, , , - i64); + i64, i64); define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: @@ -842,7 +842,7 @@ float %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -580,7 +580,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -626,7 +626,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -672,7 +672,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -718,7 +718,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64: @@ -732,7 +732,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -764,7 +764,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64: @@ -778,7 +778,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -810,7 +810,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64: @@ -824,7 +824,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -994,7 +994,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8: @@ -1008,7 +1008,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1040,7 +1040,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8: @@ -1054,7 +1054,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1086,7 +1086,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8: @@ -1100,7 +1100,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1224,7 +1224,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16: @@ -1238,7 +1238,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1270,7 +1270,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16: @@ -1284,7 +1284,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1316,7 +1316,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16: @@ -1330,7 +1330,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1408,7 +1408,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32: @@ -1422,7 +1422,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1454,7 +1454,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32: @@ -1468,7 +1468,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1500,7 +1500,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32: @@ -1514,7 +1514,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1553,7 +1553,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64: @@ -1574,7 +1574,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1613,7 +1613,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64: @@ -1634,7 +1634,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1673,7 +1673,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64: @@ -1694,7 +1694,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -580,7 +580,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -626,7 +626,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -672,7 +672,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -718,7 +718,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64: @@ -732,7 +732,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -764,7 +764,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64: @@ -778,7 +778,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -810,7 +810,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64: @@ -824,7 +824,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -994,7 +994,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8: @@ -1008,7 +1008,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1040,7 +1040,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8: @@ -1054,7 +1054,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1086,7 +1086,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8: @@ -1100,7 +1100,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1224,7 +1224,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16: @@ -1238,7 +1238,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1270,7 +1270,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16: @@ -1284,7 +1284,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1316,7 +1316,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16: @@ -1330,7 +1330,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1408,7 +1408,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32: @@ -1422,7 +1422,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1454,7 +1454,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32: @@ -1468,7 +1468,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1500,7 +1500,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32: @@ -1514,7 +1514,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1546,7 +1546,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64: @@ -1560,7 +1560,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1592,7 +1592,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64: @@ -1606,7 +1606,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1638,7 +1638,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64: @@ -1652,7 +1652,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -580,7 +580,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -626,7 +626,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -672,7 +672,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -718,7 +718,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: @@ -732,7 +732,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -764,7 +764,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: @@ -778,7 +778,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -810,7 +810,7 @@ , , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: @@ -824,7 +824,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -994,7 +994,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8: @@ -1008,7 +1008,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1040,7 +1040,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8: @@ -1054,7 +1054,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1086,7 +1086,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8: @@ -1100,7 +1100,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1224,7 +1224,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16: @@ -1238,7 +1238,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1270,7 +1270,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16: @@ -1284,7 +1284,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1316,7 +1316,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16: @@ -1330,7 +1330,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1408,7 +1408,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32: @@ -1422,7 +1422,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1454,7 +1454,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32: @@ -1468,7 +1468,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1500,7 +1500,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32: @@ -1514,7 +1514,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1553,7 +1553,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: @@ -1574,7 +1574,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1613,7 +1613,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: @@ -1634,7 +1634,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1673,7 +1673,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: @@ -1694,7 +1694,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -580,7 +580,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -626,7 +626,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -672,7 +672,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -718,7 +718,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: @@ -732,7 +732,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -764,7 +764,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: @@ -778,7 +778,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -810,7 +810,7 @@ , , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: @@ -824,7 +824,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -994,7 +994,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8: @@ -1008,7 +1008,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1040,7 +1040,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8: @@ -1054,7 +1054,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1086,7 +1086,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8: @@ -1100,7 +1100,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1224,7 +1224,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16: @@ -1238,7 +1238,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1270,7 +1270,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16: @@ -1284,7 +1284,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1316,7 +1316,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16: @@ -1330,7 +1330,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1408,7 +1408,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32: @@ -1422,7 +1422,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1454,7 +1454,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32: @@ -1468,7 +1468,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1500,7 +1500,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32: @@ -1514,7 +1514,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1546,7 +1546,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: @@ -1560,7 +1560,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1592,7 +1592,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: @@ -1606,7 +1606,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1638,7 +1638,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: @@ -1652,7 +1652,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -580,7 +580,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -626,7 +626,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -672,7 +672,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -718,7 +718,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64: @@ -732,7 +732,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -764,7 +764,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64: @@ -778,7 +778,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -810,7 +810,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64: @@ -824,7 +824,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -994,7 +994,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8: @@ -1008,7 +1008,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1040,7 +1040,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8: @@ -1054,7 +1054,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1086,7 +1086,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8: @@ -1100,7 +1100,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1224,7 +1224,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16: @@ -1238,7 +1238,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1270,7 +1270,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16: @@ -1284,7 +1284,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1316,7 +1316,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16: @@ -1330,7 +1330,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1408,7 +1408,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32: @@ -1422,7 +1422,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1454,7 +1454,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32: @@ -1468,7 +1468,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1500,7 +1500,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32: @@ -1514,7 +1514,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1553,7 +1553,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64: @@ -1574,7 +1574,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1613,7 +1613,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64: @@ -1634,7 +1634,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1673,7 +1673,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64: @@ -1694,7 +1694,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -580,7 +580,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -626,7 +626,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -672,7 +672,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -718,7 +718,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64: @@ -732,7 +732,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -764,7 +764,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64: @@ -778,7 +778,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -810,7 +810,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64: @@ -824,7 +824,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -994,7 +994,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8: @@ -1008,7 +1008,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1040,7 +1040,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8: @@ -1054,7 +1054,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1086,7 +1086,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8: @@ -1100,7 +1100,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1224,7 +1224,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16: @@ -1238,7 +1238,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1270,7 +1270,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16: @@ -1284,7 +1284,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1316,7 +1316,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16: @@ -1330,7 +1330,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1408,7 +1408,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32: @@ -1422,7 +1422,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1454,7 +1454,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32: @@ -1468,7 +1468,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1500,7 +1500,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32: @@ -1514,7 +1514,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1546,7 +1546,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64: @@ -1560,7 +1560,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1592,7 +1592,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64: @@ -1606,7 +1606,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1638,7 +1638,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64: @@ -1652,7 +1652,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -580,7 +580,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -626,7 +626,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -672,7 +672,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -718,7 +718,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: @@ -732,7 +732,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -764,7 +764,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: @@ -778,7 +778,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -810,7 +810,7 @@ , , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: @@ -824,7 +824,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -994,7 +994,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8: @@ -1008,7 +1008,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1040,7 +1040,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8: @@ -1054,7 +1054,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1086,7 +1086,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8: @@ -1100,7 +1100,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1224,7 +1224,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16: @@ -1238,7 +1238,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1270,7 +1270,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16: @@ -1284,7 +1284,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1316,7 +1316,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16: @@ -1330,7 +1330,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1408,7 +1408,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32: @@ -1422,7 +1422,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1454,7 +1454,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32: @@ -1468,7 +1468,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1500,7 +1500,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32: @@ -1514,7 +1514,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1553,7 +1553,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64: @@ -1574,7 +1574,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1613,7 +1613,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64: @@ -1634,7 +1634,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1673,7 +1673,7 @@ i64, , , - i32); + i32, i32); define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64: @@ -1694,7 +1694,7 @@ i64 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -580,7 +580,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -626,7 +626,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -672,7 +672,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -718,7 +718,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: @@ -732,7 +732,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -764,7 +764,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: @@ -778,7 +778,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -810,7 +810,7 @@ , , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: @@ -824,7 +824,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -994,7 +994,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8: @@ -1008,7 +1008,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1040,7 +1040,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8: @@ -1054,7 +1054,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1086,7 +1086,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8: @@ -1100,7 +1100,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1224,7 +1224,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16: @@ -1238,7 +1238,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1270,7 +1270,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16: @@ -1284,7 +1284,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1316,7 +1316,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16: @@ -1330,7 +1330,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1408,7 +1408,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32: @@ -1422,7 +1422,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1454,7 +1454,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32: @@ -1468,7 +1468,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1500,7 +1500,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32: @@ -1514,7 +1514,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1546,7 +1546,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64: @@ -1560,7 +1560,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1592,7 +1592,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64: @@ -1606,7 +1606,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1638,7 +1638,7 @@ i64, , , - i64); + i64, i64); define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64: @@ -1652,7 +1652,7 @@ i64 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll @@ -28,7 +28,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -75,7 +75,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -107,7 +107,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8: @@ -121,7 +121,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -154,7 +154,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -186,7 +186,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8: @@ -200,7 +200,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -233,7 +233,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -265,7 +265,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8: @@ -279,7 +279,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -312,7 +312,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -344,7 +344,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8: @@ -358,7 +358,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -391,7 +391,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -423,7 +423,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8: @@ -437,7 +437,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -470,7 +470,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -502,7 +502,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16: @@ -516,7 +516,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -549,7 +549,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -581,7 +581,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16: @@ -595,7 +595,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -628,7 +628,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -660,7 +660,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16: @@ -674,7 +674,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -707,7 +707,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -739,7 +739,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16: @@ -753,7 +753,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -786,7 +786,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -818,7 +818,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16: @@ -832,7 +832,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -865,7 +865,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -897,7 +897,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32: @@ -911,7 +911,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -944,7 +944,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -976,7 +976,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32: @@ -990,7 +990,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1023,7 +1023,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1055,7 +1055,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32: @@ -1069,7 +1069,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1102,7 +1102,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1134,7 +1134,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32: @@ -1148,7 +1148,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1181,7 +1181,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1213,7 +1213,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64: @@ -1227,7 +1227,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1260,7 +1260,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1292,7 +1292,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64: @@ -1306,7 +1306,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1339,7 +1339,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1371,7 +1371,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64: @@ -1385,7 +1385,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1418,7 +1418,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1450,7 +1450,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16: @@ -1464,7 +1464,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1497,7 +1497,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1529,7 +1529,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16: @@ -1543,7 +1543,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1576,7 +1576,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1608,7 +1608,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16: @@ -1622,7 +1622,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1655,7 +1655,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1687,7 +1687,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16: @@ -1701,7 +1701,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1734,7 +1734,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1766,7 +1766,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16: @@ -1780,7 +1780,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1813,7 +1813,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1845,7 +1845,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32: @@ -1859,7 +1859,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1892,7 +1892,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1924,7 +1924,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32: @@ -1938,7 +1938,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1971,7 +1971,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2003,7 +2003,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32: @@ -2017,7 +2017,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2050,7 +2050,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2082,7 +2082,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32: @@ -2096,7 +2096,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2129,7 +2129,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2161,7 +2161,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64: @@ -2175,7 +2175,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2208,7 +2208,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2240,7 +2240,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64: @@ -2254,7 +2254,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2287,7 +2287,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2319,7 +2319,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64: @@ -2333,7 +2333,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2366,7 +2366,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll @@ -28,7 +28,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -75,7 +75,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -107,7 +107,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8: @@ -121,7 +121,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -154,7 +154,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -186,7 +186,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8: @@ -200,7 +200,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -233,7 +233,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -265,7 +265,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8: @@ -279,7 +279,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -312,7 +312,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -344,7 +344,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8: @@ -358,7 +358,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -391,7 +391,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -423,7 +423,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8: @@ -437,7 +437,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -470,7 +470,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -502,7 +502,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16: @@ -516,7 +516,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -549,7 +549,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -581,7 +581,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16: @@ -595,7 +595,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -628,7 +628,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -660,7 +660,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16: @@ -674,7 +674,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -707,7 +707,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -739,7 +739,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16: @@ -753,7 +753,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -786,7 +786,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -818,7 +818,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16: @@ -832,7 +832,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -865,7 +865,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -897,7 +897,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32: @@ -911,7 +911,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -944,7 +944,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -976,7 +976,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32: @@ -990,7 +990,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1023,7 +1023,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1055,7 +1055,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32: @@ -1069,7 +1069,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1102,7 +1102,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1134,7 +1134,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32: @@ -1148,7 +1148,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1181,7 +1181,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1213,7 +1213,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64: @@ -1227,7 +1227,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1260,7 +1260,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1292,7 +1292,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64: @@ -1306,7 +1306,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1339,7 +1339,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1371,7 +1371,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64: @@ -1385,7 +1385,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1418,7 +1418,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1450,7 +1450,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16: @@ -1464,7 +1464,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1497,7 +1497,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1529,7 +1529,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16: @@ -1543,7 +1543,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1576,7 +1576,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1608,7 +1608,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16: @@ -1622,7 +1622,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1655,7 +1655,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1687,7 +1687,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16: @@ -1701,7 +1701,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1734,7 +1734,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1766,7 +1766,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16: @@ -1780,7 +1780,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1813,7 +1813,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1845,7 +1845,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32: @@ -1859,7 +1859,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1892,7 +1892,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1924,7 +1924,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32: @@ -1938,7 +1938,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1971,7 +1971,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2003,7 +2003,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32: @@ -2017,7 +2017,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2050,7 +2050,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2082,7 +2082,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32: @@ -2096,7 +2096,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2129,7 +2129,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2161,7 +2161,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64: @@ -2175,7 +2175,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2208,7 +2208,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2240,7 +2240,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64: @@ -2254,7 +2254,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2287,7 +2287,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2319,7 +2319,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64: @@ -2333,7 +2333,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2366,7 +2366,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll @@ -28,7 +28,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -75,7 +75,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -107,7 +107,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8: @@ -121,7 +121,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -154,7 +154,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -186,7 +186,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8: @@ -200,7 +200,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -233,7 +233,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -265,7 +265,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8: @@ -279,7 +279,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -312,7 +312,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -344,7 +344,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8: @@ -358,7 +358,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -391,7 +391,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -423,7 +423,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8: @@ -437,7 +437,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -470,7 +470,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -502,7 +502,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16: @@ -516,7 +516,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -549,7 +549,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -581,7 +581,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16: @@ -595,7 +595,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -628,7 +628,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -660,7 +660,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16: @@ -674,7 +674,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -707,7 +707,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -739,7 +739,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16: @@ -753,7 +753,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -786,7 +786,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -818,7 +818,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16: @@ -832,7 +832,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -865,7 +865,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -897,7 +897,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32: @@ -911,7 +911,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -944,7 +944,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -976,7 +976,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32: @@ -990,7 +990,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1023,7 +1023,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1055,7 +1055,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32: @@ -1069,7 +1069,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1102,7 +1102,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1134,7 +1134,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32: @@ -1148,7 +1148,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1181,7 +1181,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1213,7 +1213,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64: @@ -1227,7 +1227,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1260,7 +1260,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1292,7 +1292,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64: @@ -1306,7 +1306,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1339,7 +1339,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1371,7 +1371,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64: @@ -1385,7 +1385,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1418,7 +1418,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1450,7 +1450,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16: @@ -1464,7 +1464,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1497,7 +1497,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1529,7 +1529,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16: @@ -1543,7 +1543,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1576,7 +1576,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1608,7 +1608,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16: @@ -1622,7 +1622,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1655,7 +1655,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1687,7 +1687,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16: @@ -1701,7 +1701,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1734,7 +1734,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1766,7 +1766,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16: @@ -1780,7 +1780,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1813,7 +1813,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1845,7 +1845,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32: @@ -1859,7 +1859,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1892,7 +1892,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -1924,7 +1924,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32: @@ -1938,7 +1938,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1971,7 +1971,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2003,7 +2003,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32: @@ -2017,7 +2017,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2050,7 +2050,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2082,7 +2082,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32: @@ -2096,7 +2096,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2129,7 +2129,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2161,7 +2161,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64: @@ -2175,7 +2175,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2208,7 +2208,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2240,7 +2240,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64: @@ -2254,7 +2254,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2287,7 +2287,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } @@ -2319,7 +2319,7 @@ , i32, , - i32); + i32, i32); define @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64: @@ -2333,7 +2333,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -2366,7 +2366,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll @@ -28,7 +28,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -75,7 +75,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -107,7 +107,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8: @@ -121,7 +121,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -154,7 +154,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -186,7 +186,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8: @@ -200,7 +200,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -233,7 +233,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -265,7 +265,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8: @@ -279,7 +279,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -312,7 +312,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -344,7 +344,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8: @@ -358,7 +358,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -391,7 +391,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -423,7 +423,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8: @@ -437,7 +437,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -470,7 +470,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -502,7 +502,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16: @@ -516,7 +516,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -549,7 +549,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -581,7 +581,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16: @@ -595,7 +595,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -628,7 +628,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -660,7 +660,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16: @@ -674,7 +674,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -707,7 +707,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -739,7 +739,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16: @@ -753,7 +753,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -786,7 +786,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -818,7 +818,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16: @@ -832,7 +832,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -865,7 +865,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -897,7 +897,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32: @@ -911,7 +911,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -944,7 +944,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -976,7 +976,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32: @@ -990,7 +990,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1023,7 +1023,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1055,7 +1055,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32: @@ -1069,7 +1069,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1102,7 +1102,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1134,7 +1134,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32: @@ -1148,7 +1148,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1181,7 +1181,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1213,7 +1213,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64: @@ -1227,7 +1227,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1260,7 +1260,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1292,7 +1292,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64: @@ -1306,7 +1306,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1339,7 +1339,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1371,7 +1371,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64: @@ -1385,7 +1385,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1418,7 +1418,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1450,7 +1450,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16: @@ -1464,7 +1464,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1497,7 +1497,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1529,7 +1529,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16: @@ -1543,7 +1543,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1576,7 +1576,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1608,7 +1608,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16: @@ -1622,7 +1622,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1655,7 +1655,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1687,7 +1687,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16: @@ -1701,7 +1701,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1734,7 +1734,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1766,7 +1766,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16: @@ -1780,7 +1780,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1813,7 +1813,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1845,7 +1845,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32: @@ -1859,7 +1859,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1892,7 +1892,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -1924,7 +1924,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32: @@ -1938,7 +1938,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1971,7 +1971,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2003,7 +2003,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32: @@ -2017,7 +2017,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2050,7 +2050,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2082,7 +2082,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32: @@ -2096,7 +2096,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2129,7 +2129,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2161,7 +2161,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64: @@ -2175,7 +2175,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2208,7 +2208,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2240,7 +2240,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64: @@ -2254,7 +2254,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2287,7 +2287,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } @@ -2319,7 +2319,7 @@ , i64, , - i64); + i64, i64); define @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64: @@ -2333,7 +2333,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -2366,7 +2366,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -580,7 +580,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -626,7 +626,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -672,7 +672,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -718,7 +718,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8: @@ -732,7 +732,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -764,7 +764,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8: @@ -778,7 +778,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -810,7 +810,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8: @@ -824,7 +824,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -994,7 +994,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16: @@ -1008,7 +1008,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1040,7 +1040,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16: @@ -1054,7 +1054,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1086,7 +1086,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16: @@ -1100,7 +1100,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1224,7 +1224,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32: @@ -1238,7 +1238,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1270,7 +1270,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32: @@ -1284,7 +1284,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1316,7 +1316,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32: @@ -1330,7 +1330,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -580,7 +580,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -626,7 +626,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -672,7 +672,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -718,7 +718,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8: @@ -732,7 +732,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -764,7 +764,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8: @@ -778,7 +778,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -810,7 +810,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8: @@ -824,7 +824,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -994,7 +994,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16: @@ -1008,7 +1008,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1040,7 +1040,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16: @@ -1054,7 +1054,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1086,7 +1086,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16: @@ -1100,7 +1100,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1224,7 +1224,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32: @@ -1238,7 +1238,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1270,7 +1270,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32: @@ -1284,7 +1284,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1316,7 +1316,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32: @@ -1330,7 +1330,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -580,7 +580,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -626,7 +626,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -672,7 +672,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -718,7 +718,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8: @@ -732,7 +732,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -764,7 +764,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8: @@ -778,7 +778,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -810,7 +810,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8: @@ -824,7 +824,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -994,7 +994,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16: @@ -1008,7 +1008,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1040,7 +1040,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16: @@ -1054,7 +1054,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1086,7 +1086,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16: @@ -1100,7 +1100,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1224,7 +1224,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32: @@ -1238,7 +1238,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1270,7 +1270,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32: @@ -1284,7 +1284,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1316,7 +1316,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32: @@ -1330,7 +1330,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -580,7 +580,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -626,7 +626,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -672,7 +672,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -718,7 +718,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8: @@ -732,7 +732,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -764,7 +764,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8: @@ -778,7 +778,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -810,7 +810,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8: @@ -824,7 +824,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -994,7 +994,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16: @@ -1008,7 +1008,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1040,7 +1040,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16: @@ -1054,7 +1054,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1086,7 +1086,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16: @@ -1100,7 +1100,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1224,7 +1224,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32: @@ -1238,7 +1238,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1270,7 +1270,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32: @@ -1284,7 +1284,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1316,7 +1316,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32: @@ -1330,7 +1330,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll @@ -28,7 +28,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -580,7 +580,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -626,7 +626,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -672,7 +672,7 @@ , , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -718,7 +718,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8: @@ -732,7 +732,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -764,7 +764,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8: @@ -778,7 +778,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -810,7 +810,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8: @@ -824,7 +824,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -994,7 +994,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16: @@ -1008,7 +1008,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1040,7 +1040,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16: @@ -1054,7 +1054,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1086,7 +1086,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16: @@ -1100,7 +1100,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1224,7 +1224,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32: @@ -1238,7 +1238,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1270,7 +1270,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32: @@ -1284,7 +1284,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1316,7 +1316,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32: @@ -1330,7 +1330,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll @@ -28,7 +28,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8: @@ -42,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8: @@ -88,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8: @@ -134,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8: @@ -180,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8: @@ -226,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8: @@ -272,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16: @@ -318,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16: @@ -364,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16: @@ -410,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16: @@ -456,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16: @@ -502,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32: @@ -548,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -580,7 +580,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32: @@ -594,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -626,7 +626,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32: @@ -640,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -672,7 +672,7 @@ , , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32: @@ -686,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -718,7 +718,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8: @@ -732,7 +732,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -764,7 +764,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8: @@ -778,7 +778,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -810,7 +810,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8: @@ -824,7 +824,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -856,7 +856,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8: @@ -870,7 +870,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -902,7 +902,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8: @@ -916,7 +916,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -948,7 +948,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8: @@ -962,7 +962,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -994,7 +994,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16: @@ -1008,7 +1008,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1040,7 +1040,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16: @@ -1054,7 +1054,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1086,7 +1086,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16: @@ -1100,7 +1100,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1132,7 +1132,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16: @@ -1146,7 +1146,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1178,7 +1178,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16: @@ -1192,7 +1192,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1224,7 +1224,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32: @@ -1238,7 +1238,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1270,7 +1270,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32: @@ -1284,7 +1284,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1316,7 +1316,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32: @@ -1330,7 +1330,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -1362,7 +1362,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32: @@ -1376,7 +1376,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll @@ -28,7 +28,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8: @@ -42,7 +42,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -74,7 +74,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8: @@ -88,7 +88,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -120,7 +120,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8: @@ -134,7 +134,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -166,7 +166,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8: @@ -180,7 +180,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -212,7 +212,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8: @@ -226,7 +226,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -258,7 +258,7 @@ i8, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8: @@ -272,7 +272,7 @@ i8 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -304,7 +304,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16: @@ -318,7 +318,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -350,7 +350,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16: @@ -364,7 +364,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -396,7 +396,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16: @@ -410,7 +410,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -442,7 +442,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16: @@ -456,7 +456,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -488,7 +488,7 @@ i16, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16: @@ -502,7 +502,7 @@ i16 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -534,7 +534,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32: @@ -548,7 +548,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -580,7 +580,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32: @@ -594,7 +594,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -626,7 +626,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32: @@ -640,7 +640,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } @@ -672,7 +672,7 @@ i32, , , - i32); + i32, i32); define @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32: @@ -686,7 +686,7 @@ i32 %1, %2, %3, - i32 %4) + i32 %4, i32 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll @@ -28,7 +28,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8: @@ -42,7 +42,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -74,7 +74,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8: @@ -88,7 +88,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -120,7 +120,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8: @@ -134,7 +134,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -166,7 +166,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8: @@ -180,7 +180,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -212,7 +212,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8: @@ -226,7 +226,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -258,7 +258,7 @@ i8, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8: @@ -272,7 +272,7 @@ i8 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -304,7 +304,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16: @@ -318,7 +318,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -350,7 +350,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16: @@ -364,7 +364,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -396,7 +396,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16: @@ -410,7 +410,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -442,7 +442,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16: @@ -456,7 +456,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -488,7 +488,7 @@ i16, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16: @@ -502,7 +502,7 @@ i16 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -534,7 +534,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32: @@ -548,7 +548,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -580,7 +580,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32: @@ -594,7 +594,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -626,7 +626,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32: @@ -640,7 +640,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a } @@ -672,7 +672,7 @@ i32, , , - i64); + i64, i64); define @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32: @@ -686,7 +686,7 @@ i32 %1, %2, %3, - i64 %4) + i64 %4, i64 0) ret %a }