diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2178,13 +2178,7 @@ }] in def vxrm_enum : RVVHeader; -// 13.1. Vector Single-Width Saturating Add and Subtract let UnMaskedPolicyScheme = HasPassthruOperand in { -defm vsaddu : RVVUnsignedBinBuiltinSet; -defm vsadd : RVVSignedBinBuiltinSet; -defm vssubu : RVVUnsignedBinBuiltinSet; -defm vssub : RVVSignedBinBuiltinSet; - let ManualCodegen = [{ { // LLVM intrinsic @@ -2220,6 +2214,13 @@ return Builder.CreateCall(F, Operands, ""); } }] in { + + // 13.1. Vector Single-Width Saturating Add and Subtract + defm vsaddu : RVVUnsignedBinBuiltinSetRoundingMode; + defm vsadd : RVVSignedBinBuiltinSetRoundingMode; + defm vssubu : RVVUnsignedBinBuiltinSetRoundingMode; + defm vssub : RVVSignedBinBuiltinSetRoundingMode; + // 13.2. Vector Single-Width Averaging Add and Subtract defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode; defm vaadd : RVVSignedBinBuiltinSetRoundingMode; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8(op1, op2, vl); + return __riscv_vsadd_vv_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8(op1, op2, vl); + return __riscv_vsadd_vx_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4(op1, op2, vl); + return __riscv_vsadd_vv_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4(op1, op2, vl); + return __riscv_vsadd_vx_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2(op1, op2, vl); + return __riscv_vsadd_vv_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2(op1, op2, vl); + return __riscv_vsadd_vx_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1(op1, op2, vl); + return __riscv_vsadd_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1(op1, op2, vl); + return __riscv_vsadd_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2(op1, op2, vl); + return __riscv_vsadd_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2(op1, op2, vl); + return __riscv_vsadd_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4(op1, op2, vl); + return __riscv_vsadd_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4(op1, op2, vl); + return __riscv_vsadd_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8(op1, op2, vl); + return __riscv_vsadd_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8(op1, op2, vl); + return __riscv_vsadd_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4(op1, op2, vl); + return __riscv_vsadd_vv_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4(op1, op2, vl); + return __riscv_vsadd_vx_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2(op1, op2, vl); + return __riscv_vsadd_vv_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2(op1, op2, vl); + return __riscv_vsadd_vx_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1(op1, op2, vl); + return __riscv_vsadd_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1(op1, op2, vl); + return __riscv_vsadd_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2(op1, op2, vl); + return __riscv_vsadd_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2(op1, op2, vl); + return __riscv_vsadd_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4(op1, op2, vl); + return __riscv_vsadd_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4(op1, op2, vl); + return __riscv_vsadd_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8(op1, op2, vl); + return __riscv_vsadd_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8(op1, op2, vl); + return __riscv_vsadd_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2(op1, op2, vl); + return __riscv_vsadd_vv_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2(op1, op2, vl); + return __riscv_vsadd_vx_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1(op1, op2, vl); + return __riscv_vsadd_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1(op1, op2, vl); + return __riscv_vsadd_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2(op1, op2, vl); + return __riscv_vsadd_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2(op1, op2, vl); + return __riscv_vsadd_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4(op1, op2, vl); + return __riscv_vsadd_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4(op1, op2, vl); + return __riscv_vsadd_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8(op1, op2, vl); + return __riscv_vsadd_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8(op1, op2, vl); + return __riscv_vsadd_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1(op1, op2, vl); + return __riscv_vsadd_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1(op1, op2, vl); + return __riscv_vsadd_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2(op1, op2, vl); + return __riscv_vsadd_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2(op1, op2, vl); + return __riscv_vsadd_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4(op1, op2, vl); + return __riscv_vsadd_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4(op1, op2, vl); + return __riscv_vsadd_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8(op1, op2, vl); + return __riscv_vsadd_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8(op1, op2, vl); + return __riscv_vsadd_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8(op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8(op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4(op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4(op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2(op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2(op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1(op1, op2, vl); + return __riscv_vsaddu_vv_u8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1(op1, op2, vl); + return __riscv_vsaddu_vx_u8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2(op1, op2, vl); + return __riscv_vsaddu_vv_u8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2(op1, op2, vl); + return __riscv_vsaddu_vx_u8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4(op1, op2, vl); + return __riscv_vsaddu_vv_u8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4(op1, op2, vl); + return __riscv_vsaddu_vx_u8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8(op1, op2, vl); + return __riscv_vsaddu_vv_u8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8(op1, op2, vl); + return __riscv_vsaddu_vx_u8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4(op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4(op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2(op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2(op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1(op1, op2, vl); + return __riscv_vsaddu_vv_u16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1(op1, op2, vl); + return __riscv_vsaddu_vx_u16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2(op1, op2, vl); + return __riscv_vsaddu_vv_u16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2(op1, op2, vl); + return __riscv_vsaddu_vx_u16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4(op1, op2, vl); + return __riscv_vsaddu_vv_u16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4(op1, op2, vl); + return __riscv_vsaddu_vx_u16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8(op1, op2, vl); + return __riscv_vsaddu_vv_u16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8(op1, op2, vl); + return __riscv_vsaddu_vx_u16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2(op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2(op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1(op1, op2, vl); + return __riscv_vsaddu_vv_u32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1(op1, op2, vl); + return __riscv_vsaddu_vx_u32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2(op1, op2, vl); + return __riscv_vsaddu_vv_u32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2(op1, op2, vl); + return __riscv_vsaddu_vx_u32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4(op1, op2, vl); + return __riscv_vsaddu_vv_u32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4(op1, op2, vl); + return __riscv_vsaddu_vx_u32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8(op1, op2, vl); + return __riscv_vsaddu_vv_u32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8(op1, op2, vl); + return __riscv_vsaddu_vx_u32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1(op1, op2, vl); + return __riscv_vsaddu_vv_u64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1(op1, op2, vl); + return __riscv_vsaddu_vx_u64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2(op1, op2, vl); + return __riscv_vsaddu_vv_u64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2(op1, op2, vl); + return __riscv_vsaddu_vx_u64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4(op1, op2, vl); + return __riscv_vsaddu_vv_u64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4(op1, op2, vl); + return __riscv_vsaddu_vx_u64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8(op1, op2, vl); + return __riscv_vsaddu_vv_u64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8(op1, op2, vl); + return __riscv_vsaddu_vx_u64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8(op1, op2, vl); + return __riscv_vssub_vv_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8(op1, op2, vl); + return __riscv_vssub_vx_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4(op1, op2, vl); + return __riscv_vssub_vv_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4(op1, op2, vl); + return __riscv_vssub_vx_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2(op1, op2, vl); + return __riscv_vssub_vv_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2(op1, op2, vl); + return __riscv_vssub_vx_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1(op1, op2, vl); + return __riscv_vssub_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1(op1, op2, vl); + return __riscv_vssub_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2(op1, op2, vl); + return __riscv_vssub_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2(op1, op2, vl); + return __riscv_vssub_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4(op1, op2, vl); + return __riscv_vssub_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4(op1, op2, vl); + return __riscv_vssub_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8(op1, op2, vl); + return __riscv_vssub_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8(op1, op2, vl); + return __riscv_vssub_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4(op1, op2, vl); + return __riscv_vssub_vv_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4(op1, op2, vl); + return __riscv_vssub_vx_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2(op1, op2, vl); + return __riscv_vssub_vv_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2(op1, op2, vl); + return __riscv_vssub_vx_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1(op1, op2, vl); + return __riscv_vssub_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1(op1, op2, vl); + return __riscv_vssub_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2(op1, op2, vl); + return __riscv_vssub_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2(op1, op2, vl); + return __riscv_vssub_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4(op1, op2, vl); + return __riscv_vssub_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4(op1, op2, vl); + return __riscv_vssub_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8(op1, op2, vl); + return __riscv_vssub_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8(op1, op2, vl); + return __riscv_vssub_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2(op1, op2, vl); + return __riscv_vssub_vv_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2(op1, op2, vl); + return __riscv_vssub_vx_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1(op1, op2, vl); + return __riscv_vssub_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1(op1, op2, vl); + return __riscv_vssub_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2(op1, op2, vl); + return __riscv_vssub_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2(op1, op2, vl); + return __riscv_vssub_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4(op1, op2, vl); + return __riscv_vssub_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4(op1, op2, vl); + return __riscv_vssub_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8(op1, op2, vl); + return __riscv_vssub_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8(op1, op2, vl); + return __riscv_vssub_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1(op1, op2, vl); + return __riscv_vssub_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1(op1, op2, vl); + return __riscv_vssub_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2(op1, op2, vl); + return __riscv_vssub_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2(op1, op2, vl); + return __riscv_vssub_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4(op1, op2, vl); + return __riscv_vssub_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4(op1, op2, vl); + return __riscv_vssub_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8(op1, op2, vl); + return __riscv_vssub_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8(op1, op2, vl); + return __riscv_vssub_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8(op1, op2, vl); + return __riscv_vssubu_vv_u8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8(op1, op2, vl); + return __riscv_vssubu_vx_u8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4(op1, op2, vl); + return __riscv_vssubu_vv_u8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4(op1, op2, vl); + return __riscv_vssubu_vx_u8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2(op1, op2, vl); + return __riscv_vssubu_vv_u8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2(op1, op2, vl); + return __riscv_vssubu_vx_u8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1(op1, op2, vl); + return __riscv_vssubu_vv_u8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1(op1, op2, vl); + return __riscv_vssubu_vx_u8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2(op1, op2, vl); + return __riscv_vssubu_vv_u8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2(op1, op2, vl); + return __riscv_vssubu_vx_u8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4(op1, op2, vl); + return __riscv_vssubu_vv_u8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4(op1, op2, vl); + return __riscv_vssubu_vx_u8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8(op1, op2, vl); + return __riscv_vssubu_vv_u8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8(op1, op2, vl); + return __riscv_vssubu_vx_u8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4(op1, op2, vl); + return __riscv_vssubu_vv_u16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4(op1, op2, vl); + return __riscv_vssubu_vx_u16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2(op1, op2, vl); + return __riscv_vssubu_vv_u16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2(op1, op2, vl); + return __riscv_vssubu_vx_u16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1(op1, op2, vl); + return __riscv_vssubu_vv_u16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1(op1, op2, vl); + return __riscv_vssubu_vx_u16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2(op1, op2, vl); + return __riscv_vssubu_vv_u16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2(op1, op2, vl); + return __riscv_vssubu_vx_u16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4(op1, op2, vl); + return __riscv_vssubu_vv_u16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4(op1, op2, vl); + return __riscv_vssubu_vx_u16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8(op1, op2, vl); + return __riscv_vssubu_vv_u16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8(op1, op2, vl); + return __riscv_vssubu_vx_u16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2(op1, op2, vl); + return __riscv_vssubu_vv_u32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2(op1, op2, vl); + return __riscv_vssubu_vx_u32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1(op1, op2, vl); + return __riscv_vssubu_vv_u32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1(op1, op2, vl); + return __riscv_vssubu_vx_u32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2(op1, op2, vl); + return __riscv_vssubu_vv_u32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2(op1, op2, vl); + return __riscv_vssubu_vx_u32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4(op1, op2, vl); + return __riscv_vssubu_vv_u32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4(op1, op2, vl); + return __riscv_vssubu_vx_u32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8(op1, op2, vl); + return __riscv_vssubu_vv_u32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8(op1, op2, vl); + return __riscv_vssubu_vx_u32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1(op1, op2, vl); + return __riscv_vssubu_vv_u64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1(op1, op2, vl); + return __riscv_vssubu_vx_u64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2(op1, op2, vl); + return __riscv_vssubu_vv_u64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2(op1, op2, vl); + return __riscv_vssubu_vx_u64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4(op1, op2, vl); + return __riscv_vssubu_vv_u64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4(op1, op2, vl); + return __riscv_vssubu_vx_u64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8(op1, op2, vl); + return __riscv_vssubu_vv_u64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8(op1, op2, vl); + return __riscv_vssubu_vx_u64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); + return __riscv_vsadd(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); + return __riscv_vsadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); + return __riscv_vsaddu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); + return __riscv_vsaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); + return __riscv_vssub(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); + return __riscv_vssub(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); + return __riscv_vssubu(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); + return __riscv_vssubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsadd.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsaddu.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssub.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssubu.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1305,10 +1305,10 @@ defm vfwadd_w : RISCVBinaryAAX; defm vfwsub_w : RISCVBinaryAAX; - defm vsaddu : RISCVSaturatingBinaryAAX; - defm vsadd : RISCVSaturatingBinaryAAX; - defm vssubu : RISCVSaturatingBinaryAAX; - defm vssub : RISCVSaturatingBinaryAAX; + defm vsaddu : RISCVSaturatingBinaryAAXRoundingMode; + defm vsadd : RISCVSaturatingBinaryAAXRoundingMode; + defm vssubu : RISCVSaturatingBinaryAAXRoundingMode; + defm vssub : RISCVSaturatingBinaryAAXRoundingMode; defm vmerge : RISCVBinaryWithV0; diff --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp @@ -78,9 +78,13 @@ bool Changed = false; for (MachineInstr &MI : MBB) { if (auto RoundModeIdx = getRoundModeIdx(MI)) { - Changed = true; - unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm(); + + // The value '99' is a hint to this pass to not alter the vxrm value. + if (VXRMImm == 99) + continue; + + Changed = true; BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm)) .addImm(VXRMImm); MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2487,7 +2487,7 @@ } } -multiclass VPseudoVSALU_VV_VX_VI { +multiclass VPseudoVSALU_VV_VX_VI_RM { foreach m = MxList in { defvar mx = m.MX; defvar WriteVSALUV_MX = !cast("WriteVSALUV_" # mx); @@ -2496,11 +2496,11 @@ defvar ReadVSALUV_MX = !cast("ReadVSALUV_" # mx); defvar ReadVSALUX_MX = !cast("ReadVSALUX_" # mx); - defm "" : VPseudoBinaryV_VV, + defm "" : VPseudoBinaryV_VV_RM, Sched<[WriteVSALUV_MX, ReadVSALUV_MX, ReadVSALUV_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_VX, + defm "" : VPseudoBinaryV_VX_RM, Sched<[WriteVSALUX_MX, ReadVSALUV_MX, ReadVSALUX_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_VI, + defm "" : VPseudoBinaryV_VI_RM, Sched<[WriteVSALUI_MX, ReadVSALUV_MX, ReadVMask]>; } } @@ -2560,7 +2560,7 @@ } } -multiclass VPseudoVSALU_VV_VX { +multiclass VPseudoVSALU_VV_VX_RM { foreach m = MxList in { defvar mx = m.MX; defvar WriteVSALUV_MX = !cast("WriteVSALUV_" # mx); @@ -2568,9 +2568,9 @@ defvar ReadVSALUV_MX = !cast("ReadVSALUV_" # mx); defvar ReadVSALUX_MX = !cast("ReadVSALUX_" # mx); - defm "" : VPseudoBinaryV_VV, + defm "" : VPseudoBinaryV_VV_RM, Sched<[WriteVSALUV_MX, ReadVSALUV_MX, ReadVSALUV_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_VX, + defm "" : VPseudoBinaryV_VX_RM, Sched<[WriteVSALUX_MX, ReadVSALUV_MX, ReadVSALUX_MX, ReadVMask]>; } } @@ -5866,10 +5866,10 @@ // 12.1. Vector Single-Width Saturating Add and Subtract //===----------------------------------------------------------------------===// let Defs = [VXSAT], hasSideEffects = 1 in { - defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI; - defm PseudoVSADD : VPseudoVSALU_VV_VX_VI; - defm PseudoVSSUBU : VPseudoVSALU_VV_VX; - defm PseudoVSSUB : VPseudoVSALU_VV_VX; + defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI_RM; + defm PseudoVSADD : VPseudoVSALU_VV_VX_VI_RM; + defm PseudoVSSUBU : VPseudoVSALU_VV_VX_RM; + defm PseudoVSSUB : VPseudoVSALU_VV_VX_RM; } //===----------------------------------------------------------------------===// @@ -6531,10 +6531,14 @@ //===----------------------------------------------------------------------===// // 12.1. Vector Single-Width Saturating Add and Subtract //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vsaddu", "PseudoVSADDU", + AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vsadd", "PseudoVSADD", + AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vssubu", "PseudoVSSUBU", + AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vssub", "PseudoVSSUB", + AllIntegerVectors>; //===----------------------------------------------------------------------===// // 12.2. Vector Single-Width Averaging Add and Subtract diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -92,6 +92,29 @@ op_reg_class:$rs2, avl, log2sew)>; +class VPatBinarySDNode_VV_RM : + Pat<(result_type (vop + (op_type op_reg_class:$rs1), + (op_type op_reg_class:$rs2))), + (!cast( + !if(isSEWAware, + instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#"_VV_"# vlmul.MX)) + op_reg_class:$rs1, + op_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInertReadWriteCSR + (XLenVT 99), + avl, log2sew)>; + class VPatBinarySDNode_XI; +class VPatBinarySDNode_XI_RM : + Pat<(result_type (vop + (vop_type vop_reg_class:$rs1), + (vop_type (SplatPatKind xop_kind:$rs2)))), + (!cast( + !if(isSEWAware, + instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#_#suffix#_# vlmul.MX)) + vop_reg_class:$rs1, + xop_kind:$rs2, + // Value to indicate no rounding mode change in + // RISCVInertReadWriteCSR + (XLenVT 99), + avl, log2sew)>; + multiclass VPatBinarySDNode_VV_VX vtilist = AllIntegerVectors, bit isSEWAware = 0> { @@ -144,6 +193,35 @@ } } +multiclass VPatBinarySDNode_VV_VX_RM vtilist = AllIntegerVectors, + bit isSEWAware = 0> { + foreach vti = vtilist in { + let Predicates = GetVTypePredicates.Predicates in { + def : VPatBinarySDNode_VV_RM; + def : VPatBinarySDNode_XI_RM; + } + } +} + +multiclass VPatBinarySDNode_VV_VX_VI_RM + : VPatBinarySDNode_VV_VX_RM { + foreach vti = AllIntegerVectors in { + let Predicates = GetVTypePredicates.Predicates in + def : VPatBinarySDNode_XI_RM(SplatPat#_#ImmType), + ImmType>; + } +} + class VPatBinarySDNode_VF; -defm : VPatBinarySDNode_VV_VX_VI; -defm : VPatBinarySDNode_VV_VX; -defm : VPatBinarySDNode_VV_VX; +defm : VPatBinarySDNode_VV_VX_VI_RM; +defm : VPatBinarySDNode_VV_VX_VI_RM; +defm : VPatBinarySDNode_VV_VX_RM; +defm : VPatBinarySDNode_VV_VX_RM; // 15. Vector Mask Instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -589,6 +589,39 @@ op2_reg_class:$rs2, (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; +class VPatBinaryVL_V_RM + : Pat<(result_type (vop + (op1_type op1_reg_class:$rs1), + (op2_type op2_reg_class:$rs2), + (result_type result_reg_class:$merge), + (mask_type V0), + VLOpFrag)), + (!cast( + !if(isSEWAware, + instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInertReadWriteCSR + (XLenVT 99), + GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + + multiclass VPatTiedBinaryNoMaskVL_V; +class VPatBinaryVL_XI_RM + : Pat<(result_type (vop + (vop1_type vop_reg_class:$rs1), + (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), + (result_type result_reg_class:$merge), + (mask_type V0), + VLOpFrag)), + (!cast( + !if(isSEWAware, + instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + instruction_name#_#suffix#_#vlmul.MX#"_MASK")) + result_reg_class:$merge, + vop_reg_class:$rs1, + xop_kind:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInertReadWriteCSR + (XLenVT 99), + GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + + multiclass VPatBinaryVL_VV_VX vtilist = AllIntegerVectors, bit isSEWAware = 0> { @@ -682,6 +749,37 @@ } } +multiclass VPatBinaryVL_VV_VX_RM vtilist = AllIntegerVectors, + bit isSEWAware = 0> { + foreach vti = vtilist in { + let Predicates = GetVTypePredicates.Predicates in { + def : VPatBinaryVL_V_RM; + def : VPatBinaryVL_XI_RM; + } + } +} + +multiclass VPatBinaryVL_VV_VX_VI_RM + : VPatBinaryVL_VV_VX_RM { + foreach vti = AllIntegerVectors in { + let Predicates = GetVTypePredicates.Predicates in + def : VPatBinaryVL_XI_RM(SplatPat#_#ImmType), + ImmType>; + } +} + + multiclass VPatBinaryWVL_VV_VX { foreach VtiToWti = AllWidenableIntVectors in { defvar vti = VtiToWti.Vti; @@ -1814,10 +1912,10 @@ // 12. Vector Fixed-Point Arithmetic Instructions // 12.1. Vector Single-Width Saturating Add and Subtract -defm : VPatBinaryVL_VV_VX_VI; -defm : VPatBinaryVL_VV_VX_VI; -defm : VPatBinaryVL_VV_VX; -defm : VPatBinaryVL_VV_VX; +defm : VPatBinaryVL_VV_VX_VI_RM; +defm : VPatBinaryVL_VV_VX_VI_RM; +defm : VPatBinaryVL_VV_VX_RM; +defm : VPatBinaryVL_VV_VX_RM; // 13. Vector Floating-Point Instructions diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -20,10 +20,9 @@ ; CHECK-NEXT: .cfi_offset ra, -8 ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 24 * vlenb +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 16 * vlenb ; CHECK-NEXT: li a0, 55 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 @@ -41,48 +40,27 @@ ; CHECK-NEXT: li s0, 36 ; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v16, v8, v8, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: call func@plt -; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vrgather.vv v4, v8, v8, v0.t ; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 2 -; CHECK-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: add a1, a1, a2 -; CHECK-NEXT: vl4r.v v28, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vfwsub.wv v16, v8, v24 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vssubu.vv v4, v4, v8, v0.t -; CHECK-NEXT: vsetvli zero, s0, e32, m8, tu, mu ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 2 +; CHECK-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfwsub.wv v8, v24, v16 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, tu, mu +; CHECK-NEXT: vfdiv.vv v8, v24, v8, v0.t ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -121,25 +99,20 @@ ; SUBREGLIVENESS-NEXT: addi a0, sp, 16 ; SUBREGLIVENESS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; SUBREGLIVENESS-NEXT: call func@plt -; SUBREGLIVENESS-NEXT: li a0, 32 -; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; SUBREGLIVENESS-NEXT: vrgather.vv v16, v8, v8, v0.t ; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma +; SUBREGLIVENESS-NEXT: csrr a0, vlenb +; SUBREGLIVENESS-NEXT: slli a0, a0, 3 +; SUBREGLIVENESS-NEXT: add a0, sp, a0 +; SUBREGLIVENESS-NEXT: addi a0, a0, 16 ; SUBREGLIVENESS-NEXT: csrr a1, vlenb -; SUBREGLIVENESS-NEXT: slli a1, a1, 3 -; SUBREGLIVENESS-NEXT: add a1, sp, a1 -; SUBREGLIVENESS-NEXT: addi a1, a1, 16 -; SUBREGLIVENESS-NEXT: csrr a2, vlenb -; SUBREGLIVENESS-NEXT: slli a2, a2, 2 -; SUBREGLIVENESS-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload -; SUBREGLIVENESS-NEXT: add a1, a1, a2 -; SUBREGLIVENESS-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload -; SUBREGLIVENESS-NEXT: addi a1, sp, 16 -; SUBREGLIVENESS-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload -; SUBREGLIVENESS-NEXT: vfwsub.wv v8, v24, v20 -; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; SUBREGLIVENESS-NEXT: vssubu.vv v16, v16, v8, v0.t -; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e32, m8, tu, mu +; SUBREGLIVENESS-NEXT: slli a1, a1, 2 +; SUBREGLIVENESS-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload +; SUBREGLIVENESS-NEXT: add a0, a0, a1 +; SUBREGLIVENESS-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload +; SUBREGLIVENESS-NEXT: addi a0, sp, 16 +; SUBREGLIVENESS-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; SUBREGLIVENESS-NEXT: vfwsub.wv v8, v24, v16 +; SUBREGLIVENESS-NEXT: vsetvli zero, zero, e32, m8, tu, mu ; SUBREGLIVENESS-NEXT: vfdiv.vv v8, v24, v8, v0.t ; SUBREGLIVENESS-NEXT: vse32.v v8, (a0) ; SUBREGLIVENESS-NEXT: csrr a0, vlenb @@ -156,7 +129,7 @@ call void @func() %i3 = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( poison, poison, poison, poison, i64 32, i64 0) %i4 = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( poison, %i2, %i1, i64 36) - %i5 = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( %i3, %i3, poison, poison, i64 32, i64 0) + %i5 = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( %i3, %i3, poison, poison, i64 0, i64 32, i64 0) %i6 = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( %i4, %i2, poison, poison, i64 36, i64 0) call void @llvm.riscv.vse.nxv16f32.i64( %i6, * nonnull poison, i64 36) ret void @@ -167,6 +140,6 @@ declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(, , , , i64, i64 immarg) declare @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(, , , , i64, i64 immarg) declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(, , , i64) -declare @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(, , , , i64, i64 immarg) +declare @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(, , , , i64, i64, i64 immarg) declare @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(, , , , i64, i64 immarg) declare void @llvm.riscv.vse.nxv16f32.i64(, * nocapture, i64) #3 diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -485,12 +485,14 @@ , , iXLen, + iXLen, iXLen); define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -499,7 +501,7 @@ %0, %1, %2, - iXLen %3, iXLen 3) + iXLen 0, iXLen %3, iXLen 3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -460,11 +460,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +475,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -460,11 +460,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +475,7 @@ %1, %2, %3, - iXLen %4, iXLen 2) + iXLen 0, iXLen %4, iXLen 2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -460,11 +460,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +475,7 @@ %1, %2, %3, - iXLen %4, iXLen 0) + iXLen 0, iXLen %4, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -849,26 +849,25 @@ define void @test_dag_loop() { ; CHECK-LABEL: test_dag_loop: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma -; CHECK-NEXT: vle16.v v8, (zero) ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vsetivli zero, 0, e8, m4, tu, mu -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vssubu.vx v20, v16, zero, v0.t +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vssubu.vx v12, v8, zero, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma -; CHECK-NEXT: vmseq.vv v0, v20, v16 +; CHECK-NEXT: vmseq.vv v0, v12, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vsetivli zero, 1, e16, m8, tu, ma -; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetivli zero, 1, e16, m8, tu, mu +; CHECK-NEXT: vle16.v v8, (zero), v0.t ; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma -; CHECK-NEXT: vse16.v v16, (zero) +; CHECK-NEXT: vse16.v v8, (zero) ; CHECK-NEXT: ret entry: %0 = call @llvm.riscv.vle.nxv32i16.i64( undef, * null, i64 1) - %1 = tail call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( zeroinitializer, zeroinitializer, i8 0, zeroinitializer, i64 0, i64 0) + %1 = tail call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( zeroinitializer, zeroinitializer, i8 0, zeroinitializer, i64 0, i64 0, i64 0) %2 = tail call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64( %1, zeroinitializer, i64 0) %3 = tail call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( zeroinitializer, zeroinitializer, %0, %2, i64 1) call void @llvm.riscv.vse.nxv32i16.i64( %3, * null, i64 0) @@ -876,7 +875,7 @@ } declare @llvm.riscv.vle.nxv32i16.i64(, * nocapture, i64) -declare @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(, , i8, , i64, i64 immarg) +declare @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(, , i8, , i64, i64, i64 immarg) declare @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(, , i64) declare @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(, , , , i64) declare void @llvm.riscv.vse.nxv32i16.i64(, * nocapture, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -1186,6 +1186,7 @@ , , i64, + iXLen, iXLen); define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { @@ -1198,6 +1199,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vsadd.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1205,6 +1207,7 @@ ; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vsadd.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1212,7 +1215,7 @@ %0, %1, i64 %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1221,12 +1224,14 @@ , , , + iXLen, iXLen); define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1234,7 +1239,7 @@ %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1418,12 +1423,14 @@ , , , + iXLen, iXLen); define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1431,7 +1438,7 @@ %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1440,12 +1447,14 @@ , , , + iXLen, iXLen); define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1453,7 +1462,7 @@ %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1462,6 +1471,7 @@ , , i64, + iXLen, iXLen); define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { @@ -1474,6 +1484,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vssub.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1481,6 +1492,7 @@ ; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vssub.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1488,7 +1500,7 @@ %0, %1, i64 %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1497,6 +1509,7 @@ , , i64, + iXLen, iXLen); define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { @@ -1509,6 +1522,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vssubu.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1516,6 +1530,7 @@ ; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vssubu.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1523,7 +1538,7 @@ %0, %1, i64 %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -1,2848 +1,1291 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( - , - , - , - i32); -define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vsadd_vv_i8mf8( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i8.nxv1i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv1i8.nxv1i8.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vsadd_vx_i8mf8( %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i8.i8.i32( poison, %op1, i8 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv1i8.i8.i32(, , i8, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vsadd_vv_i8mf4( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i8.nxv2i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv2i8.nxv2i8.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vsadd_vx_i8mf4( %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i8.i8.i32( poison, %op1, i8 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv2i8.i8.i32(, , i8, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vsadd_vv_i8mf2( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i8.nxv4i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv4i8.nxv4i8.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vsadd_vx_i8mf2( %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i8.i8.i32( poison, %op1, i8 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv4i8.i8.i32(, , i8, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vsadd_vv_i8m1( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i8.nxv8i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv8i8.nxv8i8.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vsadd_vx_i8m1( %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i8.i8.i32( poison, %op1, i8 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv8i8.i8.i32(, , i8, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vsadd_vv_i8m2( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i8.nxv16i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv16i8.nxv16i8.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vsadd_vx_i8m2( %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i8.i8.i32( poison, %op1, i8 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv16i8.i8.i32(, , i8, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vsadd_vv_i8m4( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv32i8.nxv32i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv32i8.nxv32i8.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vsadd_vx_i8m4( %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv32i8.i8.i32( poison, %op1, i8 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv32i8.i8.i32(, , i8, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vsadd_vv_i8m8( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv64i8.nxv64i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv64i8.nxv64i8.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vsadd_vx_i8m8( %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv64i8.i8.i32( poison, %op1, i8 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv64i8.i8.i32(, , i8, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vsadd_vv_i16mf4( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i16.nxv1i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv1i16.nxv1i16.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vsadd_vx_i16mf4( %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i16.i16.i32( poison, %op1, i16 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv1i16.i16.i32(, , i16, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vsadd_vv_i16mf2( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i16.nxv2i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv2i16.nxv2i16.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vsadd_vx_i16mf2( %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i16.i16.i32( poison, %op1, i16 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv2i16.i16.i32(, , i16, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vsadd_vv_i16m1( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i16.nxv4i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv4i16.nxv4i16.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vsadd_vx_i16m1( %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i16.i16.i32( poison, %op1, i16 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv4i16.i16.i32(, , i16, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vsadd_vv_i16m2( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i16.nxv8i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv8i16.nxv8i16.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vsadd_vx_i16m2( %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i16.i16.i32( poison, %op1, i16 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv8i16.i16.i32(, , i16, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vsadd_vv_i16m4( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i16.nxv16i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv16i16.nxv16i16.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vsadd_vx_i16m4( %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i16.i16.i32( poison, %op1, i16 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv16i16.i16.i32(, , i16, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vsadd_vv_i16m8( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv32i16.nxv32i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv32i16.nxv32i16.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vsadd_vx_i16m8( %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv32i16.i16.i32( poison, %op1, i16 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv32i16.i16.i32(, , i16, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vsadd_vv_i32mf2( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i32.nxv1i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv1i32.nxv1i32.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vsadd_vx_i32mf2( %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i32.i32.i32( poison, %op1, i32 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv1i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vsadd_vv_i32m1( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i32.nxv2i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv2i32.nxv2i32.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vsadd_vx_i32m1( %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i32.i32.i32( poison, %op1, i32 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv2i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vsadd_vv_i32m2( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i32.nxv4i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv4i32.nxv4i32.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vsadd_vx_i32m2( %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i32.i32.i32( poison, %op1, i32 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv4i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vsadd_vv_i32m4( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i32.nxv8i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv8i32.nxv8i32.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vsadd_vx_i32m4( %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i32.i32.i32( poison, %op1, i32 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv8i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vsadd_vv_i32m8( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i32.nxv16i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv16i32.nxv16i32.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vsadd_vx_i32m8( %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i32.i32.i32( poison, %op1, i32 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv16i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64: +define @test_vsadd_vv_i64m1( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i64.nxv1i64( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv1i64.nxv1i64.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: +define @test_vsadd_vx_i64m1( %op1, i64 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v9, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i64.i64.i32( poison, %op1, i64 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv1i64.i64.i32(, , i64, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64: +define @test_vsadd_vv_i64m2( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i64.nxv2i64( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv2i64.nxv2i64.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: +define @test_vsadd_vx_i64m2( %op1, i64 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i64.i64.i32( poison, %op1, i64 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv2i64.i64.i32(, , i64, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64: +define @test_vsadd_vv_i64m4( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i64.nxv4i64( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv4i64.nxv4i64.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: +define @test_vsadd_vx_i64m4( %op1, i64 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i64.i64.i32( poison, %op1, i64 %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( - , - , - , - i32); +declare @llvm.riscv.vsadd.nxv4i64.i64.i32(, , i64, i32 immarg, i32) -define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64: +define @test_vsadd_vv_i64m8( %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i64.nxv8i64( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i32( poison, %op1, %op2, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vsadd.nxv8i64.nxv8i64.i32(, , , i32 immarg, i32) -define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: +define @test_vsadd_vx_i64m8( %op1, i64 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) + %0 = tail call @llvm.riscv.vsadd.nxv8i64.i64.i32( poison, %op1, i64 %op2, i32 0, i32 %vl) + ret %0 +} - ret %a +declare @llvm.riscv.vsadd.nxv8i64.i64.i32(, , i64, i32 immarg, i32) + +define @test_vsadd_vv_i8mf8_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf8_m: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i8.i8( - , - , - i8, - i32); +declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8: +define @test_vsadd_vx_i8mf8_m( %mask, %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i8.i8.i32( poison, %op1, i8 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i8.i8( - , - , - i8, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv1i8.i8.i32(, , i8, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8: +define @test_vsadd_vv_i8mf4_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i8.i8( - , - , - i8, - i32); +declare @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8: +define @test_vsadd_vx_i8mf4_m( %mask, %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i8.i8.i32( poison, %op1, i8 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i8.i8( - , - , - i8, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv2i8.i8.i32(, , i8, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8: +define @test_vsadd_vv_i8mf2_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i8.i8( - , - , - i8, - i32); +declare @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8: +define @test_vsadd_vx_i8mf2_m( %mask, %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i8.i8.i32( poison, %op1, i8 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i8.i8( - , - , - i8, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv4i8.i8.i32(, , i8, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8: +define @test_vsadd_vv_i8m1_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i8.i8( - , - , - i8, - i32); +declare @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8: +define @test_vsadd_vx_i8m1_m( %mask, %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i8.i8.i32( poison, %op1, i8 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i8.i8( - , - , - i8, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv8i8.i8.i32(, , i8, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8: +define @test_vsadd_vv_i8m2_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i8.i8( - , - , - i8, - i32); +declare @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8: +define @test_vsadd_vx_i8m2_m( %mask, %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i8.i8.i32( poison, %op1, i8 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i8.i8( - , - , - i8, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv16i8.i8.i32(, , i8, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8: +define @test_vsadd_vv_i8m4_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv32i8.i8( - , - , - i8, - i32); +declare @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8: +define @test_vsadd_vx_i8m4_m( %mask, %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv32i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv32i8.i8.i32( poison, %op1, i8 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv32i8.i8( - , - , - i8, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv32i8.i8.i32(, , i8, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8: +define @test_vsadd_vv_i8m8_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv64i8.i8( - , - , - i8, - i32); +declare @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8: +define @test_vsadd_vx_i8m8_m( %mask, %op1, i8 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv64i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv64i8.i8.i32( poison, %op1, i8 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv64i8.i8( - , - , - i8, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv64i8.i8.i32(, , i8, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8: +define @test_vsadd_vv_i16mf4_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i16.i16( - , - , - i16, - i32); +declare @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16: +define @test_vsadd_vx_i16mf4_m( %mask, %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16mf4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i16.i16.i32( poison, %op1, i16 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i16.i16( - , - , - i16, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv1i16.i16.i32(, , i16, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16: +define @test_vsadd_vv_i16mf2_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i16.i16( - , - , - i16, - i32); +declare @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16: +define @test_vsadd_vx_i16mf2_m( %mask, %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i16.i16.i32( poison, %op1, i16 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i16.i16( - , - , - i16, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv2i16.i16.i32(, , i16, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16: +define @test_vsadd_vv_i16m1_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i16.i16( - , - , - i16, - i32); +declare @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16: +define @test_vsadd_vx_i16m1_m( %mask, %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i16.i16.i32( poison, %op1, i16 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i16.i16( - , - , - i16, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv4i16.i16.i32(, , i16, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16: +define @test_vsadd_vv_i16m2_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i16.i16( - , - , - i16, - i32); +declare @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16: +define @test_vsadd_vx_i16m2_m( %mask, %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i16.i16.i32( poison, %op1, i16 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i16.i16( - , - , - i16, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv8i16.i16.i32(, , i16, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16: +define @test_vsadd_vv_i16m4_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i16.i16( - , - , - i16, - i32); +declare @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16: +define @test_vsadd_vx_i16m4_m( %mask, %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i16.i16.i32( poison, %op1, i16 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i16.i16( - , - , - i16, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv16i16.i16.i32(, , i16, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16: +define @test_vsadd_vv_i16m8_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv32i16.i16( - , - , - i16, - i32); +declare @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16: +define @test_vsadd_vx_i16m8_m( %mask, %op1, i16 signext %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv32i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv32i16.i16.i32( poison, %op1, i16 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv32i16.i16( - , - , - i16, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv32i16.i16.i32(, , i16, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16: +define @test_vsadd_vv_i32mf2_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i32.i32( - , - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32: +define @test_vsadd_vx_i32mf2_m( %mask, %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i32.i32.i32( poison, %op1, i32 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i32.i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv1i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32: +define @test_vsadd_vv_i32m1_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i32.i32( - , - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32: +define @test_vsadd_vx_i32m1_m( %mask, %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i32.i32.i32( poison, %op1, i32 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i32.i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv2i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32: +define @test_vsadd_vv_i32m2_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i32.i32( - , - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32: +define @test_vsadd_vx_i32m2_m( %mask, %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i32.i32.i32( poison, %op1, i32 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i32.i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv4i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32: +define @test_vsadd_vv_i32m4_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i32.i32( - , - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32: +define @test_vsadd_vx_i32m4_m( %mask, %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i32.i32.i32( poison, %op1, i32 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i32.i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv8i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32: +define @test_vsadd_vv_i32m8_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i32.i32( - , - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32: +define @test_vsadd_vx_i32m8_m( %mask, %op1, i32 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i32.i32.i32( poison, %op1, i32 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i32.i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv16i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32: +define @test_vsadd_vv_i64m1_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i64.i64( - , - , - i64, - i32); +declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: +define @test_vsadd_vx_i64m1_m( %mask, %op1, i64 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i64.i64.i32( poison, %op1, i64 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i64.i64( - , - , - i64, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv1i64.i64.i32(, , i64, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64: +define @test_vsadd_vv_i64m2_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i64.i64( - , - , - i64, - i32); +declare @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64: +define @test_vsadd_vx_i64m2_m( %mask, %op1, i64 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i64.i64.i32( poison, %op1, i64 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i64.i64( - , - , - i64, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv2i64.i64.i32(, , i64, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64: +define @test_vsadd_vv_i64m4_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i64.i64( - , - , - i64, - i32); +declare @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64: +define @test_vsadd_vx_i64m4_m( %mask, %op1, i64 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsadd.vv v8, v8, v12 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i64.i64.i32( poison, %op1, i64 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i64.i64( - , - , - i64, - , - i32, - i32); +declare @llvm.riscv.vsadd.mask.nxv4i64.i64.i32(, , i64, , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64: +define @test_vsadd_vv_i64m8_m( %mask, %op1, %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i32( poison, %op1, %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i64.i64( - , - , - i64, - i32); +declare @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64: +define @test_vsadd_vx_i64m8_m( %mask, %op1, i64 %op2, i32 %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsadd.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vsadd.mask.nxv8i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv1i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv2i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i64.i64.i32( poison, %op1, i64 %op2, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv4i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv16i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv32i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv64i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv1i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv2i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv4i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv16i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv32i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv1i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv2i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv4i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv16i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv1i64.i64( - undef, - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv2i64.i64( - undef, - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv4i64.i64( - undef, - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i64.i64( - undef, - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vsadd.mask.nxv8i64.i64.i32(, , i64, , i32 immarg, i32, i32 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -1,2800 +1,1235 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( - , - , - , - i64); -define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vsadd_vv_i8mf8( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vsadd_vx_i8mf8( %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i8.i8.i64( poison, %op1, i8 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv1i8.i8.i64(, , i8, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vsadd_vv_i8mf4( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vsadd_vx_i8mf4( %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i8.i8.i64( poison, %op1, i8 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv2i8.i8.i64(, , i8, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vsadd_vv_i8mf2( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vsadd_vx_i8mf2( %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i8.i8.i64( poison, %op1, i8 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv4i8.i8.i64(, , i8, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vsadd_vv_i8m1( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vsadd_vx_i8m1( %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i8.i8.i64( poison, %op1, i8 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv8i8.i8.i64(, , i8, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vsadd_vv_i8m2( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vsadd_vx_i8m2( %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i8.i8.i64( poison, %op1, i8 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv16i8.i8.i64(, , i8, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vsadd_vv_i8m4( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vsadd_vx_i8m4( %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv32i8.i8.i64( poison, %op1, i8 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv32i8.i8.i64(, , i8, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vsadd_vv_i8m8( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vsadd_vx_i8m8( %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv64i8.i8.i64( poison, %op1, i8 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv64i8.i8.i64(, , i8, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vsadd_vv_i16mf4( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vsadd_vx_i16mf4( %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i16.i16.i64( poison, %op1, i16 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv1i16.i16.i64(, , i16, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vsadd_vv_i16mf2( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vsadd_vx_i16mf2( %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i16.i16.i64( poison, %op1, i16 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv2i16.i16.i64(, , i16, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vsadd_vv_i16m1( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vsadd_vx_i16m1( %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i16.i16.i64( poison, %op1, i16 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv4i16.i16.i64(, , i16, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vsadd_vv_i16m2( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vsadd_vx_i16m2( %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i16.i16.i64( poison, %op1, i16 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv8i16.i16.i64(, , i16, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vsadd_vv_i16m4( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vsadd_vx_i16m4( %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i16.i16.i64( poison, %op1, i16 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv16i16.i16.i64(, , i16, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vsadd_vv_i16m8( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vsadd_vx_i16m8( %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv32i16.i16.i64( poison, %op1, i16 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv32i16.i16.i64(, , i16, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vsadd_vv_i32mf2( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vsadd_vx_i32mf2( %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i32.i32.i64( poison, %op1, i32 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv1i32.i32.i64(, , i32, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vsadd_vv_i32m1( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vsadd_vx_i32m1( %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i32.i32.i64( poison, %op1, i32 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv2i32.i32.i64(, , i32, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vsadd_vv_i32m2( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vsadd_vx_i32m2( %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i32.i32.i64( poison, %op1, i32 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv4i32.i32.i64(, , i32, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vsadd_vv_i32m4( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vsadd_vx_i32m4( %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i32.i32.i64( poison, %op1, i32 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv8i32.i32.i64(, , i32, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vsadd_vv_i32m8( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vsadd_vx_i32m8( %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv16i32.i32.i64( poison, %op1, i32 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv16i32.i32.i64(, , i32, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64: +define @test_vsadd_vv_i64m1( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: +define @test_vsadd_vx_i64m1( %op1, i64 noundef %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv1i64.i64.i64( poison, %op1, i64 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv1i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64: +define @test_vsadd_vv_i64m2( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: +define @test_vsadd_vx_i64m2( %op1, i64 noundef %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv2i64.i64.i64( poison, %op1, i64 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv2i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64: +define @test_vsadd_vv_i64m4( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: +define @test_vsadd_vx_i64m4( %op1, i64 noundef %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv4i64.i64.i64( poison, %op1, i64 %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( - , - , - , - i64); +declare @llvm.riscv.vsadd.nxv4i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64: +define @test_vsadd_vv_i64m8( %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( poison, %op1, %op2, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(, , , i64 immarg, i64) -define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: +define @test_vsadd_vx_i64m8( %op1, i64 noundef %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) + %0 = tail call @llvm.riscv.vsadd.nxv8i64.i64.i64( poison, %op1, i64 %op2, i64 0, i64 %vl) + ret %0 +} + +declare @llvm.riscv.vsadd.nxv8i64.i64.i64(, , i64, i64 immarg, i64) - ret %a +define @test_vsadd_vv_i8mf8_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf8_m: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i8.i8( - , - , - i8, - i64); +declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8: +define @test_vsadd_vx_i8mf8_m( %mask, %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( poison, %op1, i8 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i8.i8( - , - , - i8, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv1i8.i8.i64(, , i8, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8: +define @test_vsadd_vv_i8mf4_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i8.i8( - , - , - i8, - i64); +declare @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8: +define @test_vsadd_vx_i8mf4_m( %mask, %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( poison, %op1, i8 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i8.i8( - , - , - i8, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv2i8.i8.i64(, , i8, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8: +define @test_vsadd_vv_i8mf2_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i8.i8( - , - , - i8, - i64); +declare @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8: +define @test_vsadd_vx_i8mf2_m( %mask, %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( poison, %op1, i8 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i8.i8( - , - , - i8, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv4i8.i8.i64(, , i8, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8: +define @test_vsadd_vv_i8m1_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i8.i8( - , - , - i8, - i64); +declare @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8: +define @test_vsadd_vx_i8m1_m( %mask, %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( poison, %op1, i8 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i8.i8( - , - , - i8, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv8i8.i8.i64(, , i8, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8: +define @test_vsadd_vv_i8m2_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i8.i8( - , - , - i8, - i64); +declare @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8: +define @test_vsadd_vx_i8m2_m( %mask, %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( poison, %op1, i8 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i8.i8( - , - , - i8, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv16i8.i8.i64(, , i8, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8: +define @test_vsadd_vv_i8m4_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv32i8.i8( - , - , - i8, - i64); +declare @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8: +define @test_vsadd_vx_i8m4_m( %mask, %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv32i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( poison, %op1, i8 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv32i8.i8( - , - , - i8, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv32i8.i8.i64(, , i8, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8: +define @test_vsadd_vv_i8m8_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i8m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv64i8.i8( - , - , - i8, - i64); +declare @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8: +define @test_vsadd_vx_i8m8_m( %mask, %op1, i8 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i8m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv64i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( poison, %op1, i8 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv64i8.i8( - , - , - i8, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv64i8.i8.i64(, , i8, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8: +define @test_vsadd_vv_i16mf4_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i16.i16( - , - , - i16, - i64); +declare @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16: +define @test_vsadd_vx_i16mf4_m( %mask, %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16mf4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( poison, %op1, i16 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i16.i16( - , - , - i16, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv1i16.i16.i64(, , i16, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16: +define @test_vsadd_vv_i16mf2_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i16.i16( - , - , - i16, - i64); +declare @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16: +define @test_vsadd_vx_i16mf2_m( %mask, %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( poison, %op1, i16 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i16.i16( - , - , - i16, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv2i16.i16.i64(, , i16, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16: +define @test_vsadd_vv_i16m1_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i16.i16( - , - , - i16, - i64); +declare @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16: +define @test_vsadd_vx_i16m1_m( %mask, %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( poison, %op1, i16 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i16.i16( - , - , - i16, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv4i16.i16.i64(, , i16, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16: +define @test_vsadd_vv_i16m2_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i16.i16( - , - , - i16, - i64); +declare @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16: +define @test_vsadd_vx_i16m2_m( %mask, %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( poison, %op1, i16 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i16.i16( - , - , - i16, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv8i16.i16.i64(, , i16, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16: +define @test_vsadd_vv_i16m4_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i16.i16( - , - , - i16, - i64); +declare @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16: +define @test_vsadd_vx_i16m4_m( %mask, %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( poison, %op1, i16 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i16.i16( - , - , - i16, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv16i16.i16.i64(, , i16, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16: +define @test_vsadd_vv_i16m8_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i16m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv32i16.i16( - , - , - i16, - i64); +declare @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16: +define @test_vsadd_vx_i16m8_m( %mask, %op1, i16 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i16m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv32i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( poison, %op1, i16 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv32i16.i16( - , - , - i16, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv32i16.i16.i64(, , i16, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16: +define @test_vsadd_vv_i32mf2_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i32.i32( - , - , - i32, - i64); +declare @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32: +define @test_vsadd_vx_i32mf2_m( %mask, %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( poison, %op1, i32 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i32.i32( - , - , - i32, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(, , i32, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32: +define @test_vsadd_vv_i32m1_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i32.i32( - , - , - i32, - i64); +declare @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32: +define @test_vsadd_vx_i32m1_m( %mask, %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( poison, %op1, i32 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i32.i32( - , - , - i32, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv2i32.i32.i64(, , i32, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32: +define @test_vsadd_vv_i32m2_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i32.i32( - , - , - i32, - i64); +declare @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32: +define @test_vsadd_vx_i32m2_m( %mask, %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( poison, %op1, i32 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i32.i32( - , - , - i32, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv4i32.i32.i64(, , i32, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32: +define @test_vsadd_vv_i32m4_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i32.i32( - , - , - i32, - i64); +declare @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32: +define @test_vsadd_vx_i32m4_m( %mask, %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv8i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( poison, %op1, i32 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv8i32.i32( - , - , - i32, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv8i32.i32.i64(, , i32, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32: +define @test_vsadd_vv_i32m8_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i32m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv16i32.i32( - , - , - i32, - i64); +declare @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32: +define @test_vsadd_vx_i32m8_m( %mask, %op1, i32 noundef signext %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i32m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv16i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( poison, %op1, i32 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv16i32.i32( - , - , - i32, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv16i32.i32.i64(, , i32, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32: +define @test_vsadd_vv_i64m1_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv1i64.i64( - , - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: +define @test_vsadd_vx_i64m1_m( %mask, %op1, i64 noundef %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( poison, %op1, i64 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv1i64.i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv1i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64: +define @test_vsadd_vv_i64m2_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv2i64.i64( - , - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64: +define @test_vsadd_vx_i64m2_m( %mask, %op1, i64 noundef %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv2i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( poison, %op1, i64 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv2i64.i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv2i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64: +define @test_vsadd_vv_i64m4_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv4i64.i64( - , - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64: +define @test_vsadd_vx_i64m4_m( %mask, %op1, i64 noundef %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv4i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( poison, %op1, i64 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.mask.nxv4i64.i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv4i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64: +define @test_vsadd_vv_i64m8_m( %mask, %op1, %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vv_i64m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( poison, %op1, %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vsadd.nxv8i64.i64( - , - , - i64, - i64); +declare @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64: +define @test_vsadd_vx_i64m8_m( %mask, %op1, i64 noundef %op2, i64 noundef %vl) { +; CHECK-LABEL: test_vsadd_vx_i64m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vsadd.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vsadd.mask.nxv8i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vsadd.nxv1i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a + %0 = tail call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( poison, %op1, i64 %op2, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv2i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv4i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv16i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv32i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv64i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv1i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv2i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv4i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv16i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv32i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv1i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv2i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv4i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv16i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv1i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv2i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv4i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vsadd.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.nxv8i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsadd.mask.nxv8i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} +declare @llvm.riscv.vsadd.mask.nxv8i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -5,12 +5,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -18,7 +19,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -28,13 +29,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -52,12 +53,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -65,7 +67,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -75,13 +77,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -90,7 +92,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -99,12 +101,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -112,7 +115,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -122,13 +125,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +140,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -146,12 +149,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -159,7 +163,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -169,13 +173,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -184,7 +188,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -193,12 +197,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -206,7 +211,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -216,13 +221,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -231,7 +236,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -240,12 +245,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -253,7 +259,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -263,13 +269,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -278,7 +284,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -287,12 +293,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -300,7 +307,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -310,14 +317,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -326,7 +333,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -358,13 +366,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -373,7 +381,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -382,12 +390,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -395,7 +404,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -405,13 +414,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -420,7 +429,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -429,12 +438,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -442,7 +452,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -452,13 +462,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -467,7 +477,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -476,12 +486,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -489,7 +500,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -499,13 +510,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -514,7 +525,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -523,12 +534,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -546,13 +558,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -561,7 +573,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -570,12 +582,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -583,7 +596,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -593,14 +606,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -609,7 +622,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -618,12 +631,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -631,7 +645,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -641,13 +655,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +670,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -665,12 +679,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -678,7 +693,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -688,13 +703,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +718,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -712,12 +727,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -725,7 +741,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -735,13 +751,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -750,7 +766,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -759,12 +775,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -772,7 +789,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -782,13 +799,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +814,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -806,12 +823,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -819,7 +837,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -829,14 +847,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -845,7 +863,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -854,12 +872,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -867,7 +886,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -877,13 +896,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +911,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -901,12 +920,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -914,7 +934,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -924,13 +944,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +959,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -948,12 +968,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -961,7 +982,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -971,13 +992,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1007,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -995,12 +1016,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1008,7 +1030,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1018,14 +1040,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1056,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1043,12 +1065,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1079,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1066,13 +1089,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1081,7 +1104,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1090,12 +1113,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1127,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1113,13 +1137,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1128,7 +1152,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1137,12 +1161,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1150,7 +1175,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1160,13 +1185,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1175,7 +1200,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1184,12 +1209,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1197,7 +1223,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1207,13 +1233,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1222,7 +1248,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1231,12 +1257,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1244,7 +1271,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1254,13 +1281,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1269,7 +1296,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1278,12 +1305,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1291,7 +1319,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1301,13 +1329,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1344,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1325,12 +1353,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1338,7 +1367,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1348,13 +1377,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1363,7 +1392,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1372,12 +1401,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1385,7 +1415,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1395,13 +1425,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1410,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1419,12 +1449,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1432,7 +1463,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1442,13 +1473,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1457,7 +1488,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1466,12 +1497,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1479,7 +1511,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1489,13 +1521,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1504,7 +1536,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1513,12 +1545,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1526,7 +1559,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1536,13 +1569,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1551,7 +1584,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1560,12 +1593,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1573,7 +1607,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1583,13 +1617,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1598,7 +1632,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1607,12 +1641,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1620,7 +1655,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1630,13 +1665,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1645,7 +1680,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1653,13 +1688,13 @@ declare @llvm.riscv.vsaddu.nxv1i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1667,7 +1702,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1677,13 +1712,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1727,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1700,13 +1735,13 @@ declare @llvm.riscv.vsaddu.nxv2i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1714,7 +1749,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1724,13 +1759,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1739,7 +1774,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1747,13 +1782,13 @@ declare @llvm.riscv.vsaddu.nxv4i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1761,7 +1796,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1771,13 +1806,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1821,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1794,13 +1829,13 @@ declare @llvm.riscv.vsaddu.nxv8i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1808,7 +1843,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1818,13 +1853,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1833,7 +1868,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1841,13 +1876,13 @@ declare @llvm.riscv.vsaddu.nxv16i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1855,7 +1890,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1865,13 +1900,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1880,7 +1915,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1889,7 +1924,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64: @@ -1900,6 +1935,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1908,7 +1944,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1918,8 +1954,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64: @@ -1930,6 +1965,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1939,7 +1975,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1948,7 +1984,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64: @@ -1959,6 +1995,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1967,7 +2004,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1977,8 +2014,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64: @@ -1989,6 +2025,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1998,7 +2035,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2007,7 +2044,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64: @@ -2018,6 +2055,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2026,7 +2064,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -2036,8 +2074,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64: @@ -2048,6 +2085,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2057,7 +2095,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2066,7 +2104,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64: @@ -2077,6 +2115,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2085,7 +2124,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -2095,8 +2134,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64: @@ -2107,6 +2145,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2116,7 +2155,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2125,6 +2164,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2132,7 +2172,7 @@ undef, %0, i8 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2141,6 +2181,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2149,7 +2190,7 @@ %1, i8 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2158,6 +2199,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2165,7 +2207,7 @@ undef, %0, i8 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2174,6 +2216,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2182,7 +2225,7 @@ %1, i8 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2191,6 +2234,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2198,7 +2242,7 @@ undef, %0, i8 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2207,6 +2251,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2215,7 +2260,7 @@ %1, i8 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2224,6 +2269,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2231,7 +2277,7 @@ undef, %0, i8 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2240,6 +2286,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2248,7 +2295,7 @@ %1, i8 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2257,6 +2304,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2264,7 +2312,7 @@ undef, %0, i8 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2273,6 +2321,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2281,7 +2330,7 @@ %1, i8 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2290,6 +2339,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2297,7 +2347,7 @@ undef, %0, i8 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2306,6 +2356,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2314,7 +2365,7 @@ %1, i8 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2323,6 +2374,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2330,7 +2382,7 @@ undef, %0, i8 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2339,6 +2391,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2347,7 +2400,7 @@ %1, i8 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2356,6 +2409,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2363,7 +2417,7 @@ undef, %0, i16 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2372,6 +2426,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2380,7 +2435,7 @@ %1, i16 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2389,6 +2444,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2396,7 +2452,7 @@ undef, %0, i16 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2405,6 +2461,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2413,7 +2470,7 @@ %1, i16 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2422,6 +2479,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2429,7 +2487,7 @@ undef, %0, i16 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2438,6 +2496,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2446,7 +2505,7 @@ %1, i16 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2455,6 +2514,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2462,7 +2522,7 @@ undef, %0, i16 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2471,6 +2531,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2479,7 +2540,7 @@ %1, i16 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2488,6 +2549,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2495,7 +2557,7 @@ undef, %0, i16 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2504,6 +2566,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2575,7 @@ %1, i16 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2521,6 +2584,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2528,7 +2592,7 @@ undef, %0, i16 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2537,6 +2601,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2545,7 +2610,7 @@ %1, i16 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2554,6 +2619,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2561,7 +2627,7 @@ undef, %0, i32 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2570,6 +2636,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2578,7 +2645,7 @@ %1, i32 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2587,6 +2654,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2594,7 +2662,7 @@ undef, %0, i32 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2603,6 +2671,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2611,7 +2680,7 @@ %1, i32 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2620,6 +2689,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2627,7 +2697,7 @@ undef, %0, i32 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2636,6 +2706,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2644,7 +2715,7 @@ %1, i32 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2653,6 +2724,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2660,7 +2732,7 @@ undef, %0, i32 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2669,6 +2741,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2677,7 +2750,7 @@ %1, i32 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2686,6 +2759,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2693,7 +2767,7 @@ undef, %0, i32 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2702,6 +2776,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2710,7 +2785,7 @@ %1, i32 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2719,6 +2794,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2726,7 +2802,7 @@ undef, %0, i64 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2735,6 +2811,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2743,7 +2820,7 @@ %1, i64 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2752,6 +2829,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2759,7 +2837,7 @@ undef, %0, i64 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2768,6 +2846,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2776,7 +2855,7 @@ %1, i64 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2785,6 +2864,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2792,7 +2872,7 @@ undef, %0, i64 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2801,6 +2881,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2809,7 +2890,7 @@ %1, i64 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } @@ -2818,6 +2899,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2825,7 +2907,7 @@ undef, %0, i64 9, - i32 %1) + i32 0, i32 %1) ret %a } @@ -2834,6 +2916,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2842,7 +2925,7 @@ %1, i64 9, %2, - i32 %3, i32 1) + i32 0, i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -5,12 +5,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -18,7 +19,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -28,13 +29,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -52,12 +53,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -65,7 +67,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -75,13 +77,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -90,7 +92,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -99,12 +101,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -112,7 +115,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -122,13 +125,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +140,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -146,12 +149,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -159,7 +163,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -169,13 +173,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -184,7 +188,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -193,12 +197,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -206,7 +211,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -216,13 +221,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -231,7 +236,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -240,12 +245,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -253,7 +259,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -263,13 +269,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -278,7 +284,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -287,12 +293,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -300,7 +307,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -310,14 +317,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -326,7 +333,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -358,13 +366,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -373,7 +381,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -382,12 +390,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -395,7 +404,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -405,13 +414,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -420,7 +429,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -429,12 +438,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -442,7 +452,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -452,13 +462,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -467,7 +477,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -476,12 +486,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -489,7 +500,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -499,13 +510,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -514,7 +525,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -523,12 +534,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -546,13 +558,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -561,7 +573,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -570,12 +582,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -583,7 +596,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -593,14 +606,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -609,7 +622,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -618,12 +631,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -631,7 +645,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -641,13 +655,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +670,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -665,12 +679,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -678,7 +693,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -688,13 +703,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +718,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -712,12 +727,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -725,7 +741,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -735,13 +751,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -750,7 +766,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -759,12 +775,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -772,7 +789,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -782,13 +799,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +814,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -806,12 +823,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -819,7 +837,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -829,14 +847,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -845,7 +863,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -854,12 +872,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -867,7 +886,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -877,13 +896,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +911,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -901,12 +920,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -914,7 +934,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -924,13 +944,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +959,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -948,12 +968,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -961,7 +982,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -971,13 +992,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1007,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -995,12 +1016,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1008,7 +1030,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1018,14 +1040,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1056,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1043,12 +1065,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1079,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1066,13 +1089,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1081,7 +1104,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1090,12 +1113,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1127,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1113,13 +1137,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1128,7 +1152,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1137,12 +1161,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1150,7 +1175,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1160,13 +1185,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1175,7 +1200,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1184,12 +1209,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1197,7 +1223,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1207,13 +1233,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1222,7 +1248,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1231,12 +1257,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1244,7 +1271,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1254,13 +1281,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1269,7 +1296,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1278,12 +1305,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1291,7 +1319,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1301,13 +1329,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1344,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1325,12 +1353,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1338,7 +1367,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1348,13 +1377,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1363,7 +1392,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1372,12 +1401,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1385,7 +1415,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1395,13 +1425,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1410,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1419,12 +1449,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1432,7 +1463,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1442,13 +1473,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1457,7 +1488,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1466,12 +1497,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1479,7 +1511,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1489,13 +1521,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1504,7 +1536,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1513,12 +1545,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1526,7 +1559,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1536,13 +1569,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1551,7 +1584,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1560,12 +1593,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1573,7 +1607,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1583,13 +1617,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1598,7 +1632,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1607,12 +1641,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1620,7 +1655,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1630,13 +1665,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1645,7 +1680,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1654,12 +1689,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1667,7 +1703,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1677,13 +1713,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1728,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1701,12 +1737,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1714,7 +1751,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1724,13 +1761,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1739,7 +1776,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1748,12 +1785,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1761,7 +1799,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1771,13 +1809,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1824,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1795,12 +1833,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1808,7 +1847,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1818,13 +1857,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1833,7 +1872,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1842,12 +1881,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1855,7 +1895,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1865,13 +1905,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1880,7 +1920,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1888,13 +1928,13 @@ declare @llvm.riscv.vsaddu.nxv1i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1902,7 +1942,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1912,13 +1952,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1927,7 +1967,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1935,13 +1975,13 @@ declare @llvm.riscv.vsaddu.nxv2i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1949,7 +1989,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1959,13 +1999,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1974,7 +2014,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1982,13 +2022,13 @@ declare @llvm.riscv.vsaddu.nxv4i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1996,7 +2036,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -2006,13 +2046,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2021,7 +2061,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -2029,13 +2069,13 @@ declare @llvm.riscv.vsaddu.nxv8i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2043,7 +2083,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -2053,13 +2093,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2068,7 +2108,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -2077,6 +2117,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2084,7 +2125,7 @@ undef, %0, i8 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2093,6 +2134,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2101,7 +2143,7 @@ %1, i8 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2110,6 +2152,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2117,7 +2160,7 @@ undef, %0, i8 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2126,6 +2169,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2134,7 +2178,7 @@ %1, i8 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2143,6 +2187,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2150,7 +2195,7 @@ undef, %0, i8 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2159,6 +2204,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2167,7 +2213,7 @@ %1, i8 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2176,6 +2222,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2183,7 +2230,7 @@ undef, %0, i8 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2192,6 +2239,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2200,7 +2248,7 @@ %1, i8 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2209,6 +2257,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2216,7 +2265,7 @@ undef, %0, i8 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2225,6 +2274,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2233,7 +2283,7 @@ %1, i8 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2242,6 +2292,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2249,7 +2300,7 @@ undef, %0, i8 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2258,6 +2309,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2266,7 +2318,7 @@ %1, i8 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2275,6 +2327,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2282,7 +2335,7 @@ undef, %0, i8 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2291,6 +2344,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2299,7 +2353,7 @@ %1, i8 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2308,6 +2362,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2315,7 +2370,7 @@ undef, %0, i16 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2324,6 +2379,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2332,7 +2388,7 @@ %1, i16 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2341,6 +2397,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2348,7 +2405,7 @@ undef, %0, i16 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2357,6 +2414,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2365,7 +2423,7 @@ %1, i16 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2374,6 +2432,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2381,7 +2440,7 @@ undef, %0, i16 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2390,6 +2449,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2398,7 +2458,7 @@ %1, i16 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2407,6 +2467,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2414,7 +2475,7 @@ undef, %0, i16 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2423,6 +2484,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2431,7 +2493,7 @@ %1, i16 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2440,6 +2502,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2447,7 +2510,7 @@ undef, %0, i16 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2456,6 +2519,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2464,7 +2528,7 @@ %1, i16 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2473,6 +2537,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2480,7 +2545,7 @@ undef, %0, i16 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2489,6 +2554,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2497,7 +2563,7 @@ %1, i16 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2506,6 +2572,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2513,7 +2580,7 @@ undef, %0, i32 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2522,6 +2589,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2530,7 +2598,7 @@ %1, i32 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2539,6 +2607,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2546,7 +2615,7 @@ undef, %0, i32 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2555,6 +2624,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2563,7 +2633,7 @@ %1, i32 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2572,6 +2642,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2579,7 +2650,7 @@ undef, %0, i32 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2588,6 +2659,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2596,7 +2668,7 @@ %1, i32 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2605,6 +2677,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2612,7 +2685,7 @@ undef, %0, i32 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2621,6 +2694,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2629,7 +2703,7 @@ %1, i32 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2638,6 +2712,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2645,7 +2720,7 @@ undef, %0, i32 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2654,6 +2729,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2662,7 +2738,7 @@ %1, i32 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2671,6 +2747,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2678,7 +2755,7 @@ undef, %0, i64 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2687,6 +2764,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2695,7 +2773,7 @@ %1, i64 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2704,6 +2782,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2711,7 +2790,7 @@ undef, %0, i64 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2720,6 +2799,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2728,7 +2808,7 @@ %1, i64 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2737,6 +2817,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2744,7 +2825,7 @@ undef, %0, i64 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2753,6 +2834,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2761,7 +2843,7 @@ %1, i64 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } @@ -2770,6 +2852,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2777,7 +2860,7 @@ undef, %0, i64 9, - i64 %1) + i64 0, i64 %1) ret %a } @@ -2786,6 +2869,7 @@ ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2794,7 +2878,7 @@ %1, i64 9, %2, - i64 %3, i64 1) + i64 0, i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -5,12 +5,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -18,7 +19,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -28,13 +29,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -52,12 +53,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -65,7 +67,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -75,13 +77,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -90,7 +92,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -99,12 +101,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -112,7 +115,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -122,13 +125,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +140,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -146,12 +149,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -159,7 +163,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -169,13 +173,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -184,7 +188,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -193,12 +197,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -206,7 +211,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -216,13 +221,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -231,7 +236,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -240,12 +245,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -253,7 +259,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -263,13 +269,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -278,7 +284,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -287,12 +293,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -300,7 +307,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -310,14 +317,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -326,7 +333,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -358,13 +366,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -373,7 +381,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -382,12 +390,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -395,7 +404,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -405,13 +414,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -420,7 +429,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -429,12 +438,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -442,7 +452,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -452,13 +462,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -467,7 +477,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -476,12 +486,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -489,7 +500,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -499,13 +510,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -514,7 +525,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -523,12 +534,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -546,13 +558,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -561,7 +573,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -570,12 +582,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -583,7 +596,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -593,14 +606,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -609,7 +622,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -618,12 +631,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -631,7 +645,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -641,13 +655,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +670,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -665,12 +679,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -678,7 +693,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -688,13 +703,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +718,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -712,12 +727,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -725,7 +741,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -735,13 +751,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -750,7 +766,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -759,12 +775,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -772,7 +789,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -782,13 +799,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +814,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -806,12 +823,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -819,7 +837,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -829,14 +847,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -845,7 +863,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -854,12 +872,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -867,7 +886,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -877,13 +896,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +911,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -901,12 +920,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -914,7 +934,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -924,13 +944,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +959,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -948,12 +968,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -961,7 +982,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -971,13 +992,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1007,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -995,12 +1016,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1008,7 +1030,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1018,14 +1040,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1056,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1043,12 +1065,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1079,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1066,13 +1089,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1081,7 +1104,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1090,12 +1113,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1127,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1113,13 +1137,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1128,7 +1152,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1137,12 +1161,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1150,7 +1175,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1160,13 +1185,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1175,7 +1200,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1184,12 +1209,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1197,7 +1223,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1207,13 +1233,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1222,7 +1248,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1231,12 +1257,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1244,7 +1271,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1254,13 +1281,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1269,7 +1296,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1278,12 +1305,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1291,7 +1319,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1301,13 +1329,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1344,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1325,12 +1353,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1338,7 +1367,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1348,13 +1377,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1363,7 +1392,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1372,12 +1401,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1385,7 +1415,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1395,13 +1425,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1410,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1419,12 +1449,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1432,7 +1463,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1442,13 +1473,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1457,7 +1488,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1466,12 +1497,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1479,7 +1511,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1489,13 +1521,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1504,7 +1536,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1513,12 +1545,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1526,7 +1559,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1536,13 +1569,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1551,7 +1584,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1560,12 +1593,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1573,7 +1607,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1583,13 +1617,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1598,7 +1632,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1607,12 +1641,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1620,7 +1655,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1630,13 +1665,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1645,7 +1680,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1653,13 +1688,13 @@ declare @llvm.riscv.vssub.nxv1i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1667,7 +1702,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1677,13 +1712,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1727,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1700,13 +1735,13 @@ declare @llvm.riscv.vssub.nxv2i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1714,7 +1749,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1724,13 +1759,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1739,7 +1774,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1747,13 +1782,13 @@ declare @llvm.riscv.vssub.nxv4i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1761,7 +1796,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1771,13 +1806,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1821,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1794,13 +1829,13 @@ declare @llvm.riscv.vssub.nxv8i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1808,7 +1843,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1818,13 +1853,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1833,7 +1868,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1841,13 +1876,13 @@ declare @llvm.riscv.vssub.nxv16i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1855,7 +1890,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1865,13 +1900,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1880,7 +1915,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1889,7 +1924,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: @@ -1900,6 +1935,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1908,7 +1944,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1918,8 +1954,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64: @@ -1930,6 +1965,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1939,7 +1975,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1948,7 +1984,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64: @@ -1959,6 +1995,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1967,7 +2004,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1977,8 +2014,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64: @@ -1989,6 +2025,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1998,7 +2035,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2007,7 +2044,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64: @@ -2018,6 +2055,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2026,7 +2064,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -2036,8 +2074,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64: @@ -2048,6 +2085,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2057,7 +2095,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2066,7 +2104,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64: @@ -2077,6 +2115,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2085,7 +2124,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -2095,8 +2134,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64: @@ -2107,6 +2145,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2116,7 +2155,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -5,12 +5,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -18,7 +19,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -28,13 +29,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -52,12 +53,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -65,7 +67,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -75,13 +77,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -90,7 +92,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -99,12 +101,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -112,7 +115,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -122,13 +125,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +140,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -146,12 +149,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -159,7 +163,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -169,13 +173,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -184,7 +188,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -193,12 +197,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -206,7 +211,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -216,13 +221,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -231,7 +236,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -240,12 +245,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -253,7 +259,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -263,13 +269,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -278,7 +284,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -287,12 +293,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -300,7 +307,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -310,14 +317,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -326,7 +333,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -358,13 +366,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -373,7 +381,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -382,12 +390,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -395,7 +404,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -405,13 +414,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -420,7 +429,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -429,12 +438,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -442,7 +452,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -452,13 +462,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -467,7 +477,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -476,12 +486,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -489,7 +500,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -499,13 +510,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -514,7 +525,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -523,12 +534,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -546,13 +558,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -561,7 +573,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -570,12 +582,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -583,7 +596,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -593,14 +606,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -609,7 +622,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -618,12 +631,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -631,7 +645,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -641,13 +655,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +670,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -665,12 +679,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -678,7 +693,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -688,13 +703,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +718,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -712,12 +727,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -725,7 +741,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -735,13 +751,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -750,7 +766,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -759,12 +775,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -772,7 +789,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -782,13 +799,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +814,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -806,12 +823,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -819,7 +837,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -829,14 +847,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -845,7 +863,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -854,12 +872,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -867,7 +886,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -877,13 +896,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +911,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -901,12 +920,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -914,7 +934,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -924,13 +944,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +959,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -948,12 +968,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -961,7 +982,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -971,13 +992,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1007,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -995,12 +1016,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1008,7 +1030,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1018,14 +1040,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1056,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1043,12 +1065,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1079,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1066,13 +1089,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1081,7 +1104,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1090,12 +1113,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1127,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1113,13 +1137,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1128,7 +1152,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1137,12 +1161,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1150,7 +1175,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1160,13 +1185,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1175,7 +1200,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1184,12 +1209,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1197,7 +1223,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1207,13 +1233,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1222,7 +1248,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1231,12 +1257,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1244,7 +1271,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1254,13 +1281,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1269,7 +1296,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1278,12 +1305,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1291,7 +1319,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1301,13 +1329,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1344,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1325,12 +1353,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1338,7 +1367,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1348,13 +1377,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1363,7 +1392,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1372,12 +1401,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1385,7 +1415,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1395,13 +1425,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1410,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1419,12 +1449,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1432,7 +1463,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1442,13 +1473,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1457,7 +1488,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1466,12 +1497,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1479,7 +1511,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1489,13 +1521,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1504,7 +1536,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1513,12 +1545,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1526,7 +1559,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1536,13 +1569,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1551,7 +1584,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1560,12 +1593,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1573,7 +1607,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1583,13 +1617,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1598,7 +1632,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1607,12 +1641,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1620,7 +1655,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1630,13 +1665,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1645,7 +1680,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1654,12 +1689,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1667,7 +1703,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1677,13 +1713,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1728,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1701,12 +1737,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1714,7 +1751,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1724,13 +1761,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1739,7 +1776,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1748,12 +1785,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1761,7 +1799,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1771,13 +1809,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1824,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1795,12 +1833,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1808,7 +1847,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1818,13 +1857,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1833,7 +1872,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1842,12 +1881,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1855,7 +1895,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1865,13 +1905,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1880,7 +1920,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1888,13 +1928,13 @@ declare @llvm.riscv.vssub.nxv1i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1902,7 +1942,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1912,13 +1952,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1927,7 +1967,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1935,13 +1975,13 @@ declare @llvm.riscv.vssub.nxv2i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1949,7 +1989,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1959,13 +1999,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1974,7 +2014,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1982,13 +2022,13 @@ declare @llvm.riscv.vssub.nxv4i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1996,7 +2036,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -2006,13 +2046,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2021,7 +2061,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -2029,13 +2069,13 @@ declare @llvm.riscv.vssub.nxv8i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2043,7 +2083,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -2053,13 +2093,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2068,7 +2108,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -5,12 +5,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -18,7 +19,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -28,13 +29,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -52,12 +53,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -65,7 +67,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -75,13 +77,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -90,7 +92,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -99,12 +101,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -112,7 +115,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -122,13 +125,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +140,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -146,12 +149,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -159,7 +163,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -169,13 +173,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -184,7 +188,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -193,12 +197,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -206,7 +211,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -216,13 +221,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -231,7 +236,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -240,12 +245,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -253,7 +259,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -263,13 +269,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -278,7 +284,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -287,12 +293,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -300,7 +307,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -310,14 +317,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -326,7 +333,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -358,13 +366,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -373,7 +381,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -382,12 +390,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -395,7 +404,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -405,13 +414,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -420,7 +429,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -429,12 +438,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -442,7 +452,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -452,13 +462,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -467,7 +477,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -476,12 +486,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -489,7 +500,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -499,13 +510,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -514,7 +525,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -523,12 +534,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -546,13 +558,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -561,7 +573,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -570,12 +582,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -583,7 +596,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -593,14 +606,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -609,7 +622,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -618,12 +631,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -631,7 +645,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -641,13 +655,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +670,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -665,12 +679,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -678,7 +693,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -688,13 +703,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +718,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -712,12 +727,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -725,7 +741,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -735,13 +751,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -750,7 +766,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -759,12 +775,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -772,7 +789,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -782,13 +799,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +814,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -806,12 +823,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -819,7 +837,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -829,14 +847,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -845,7 +863,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -854,12 +872,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -867,7 +886,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -877,13 +896,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +911,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -901,12 +920,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -914,7 +934,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -924,13 +944,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +959,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -948,12 +968,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -961,7 +982,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -971,13 +992,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1007,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -995,12 +1016,13 @@ , , , - i32); + i32, i32); define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1008,7 +1030,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1018,14 +1040,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1056,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1043,12 +1065,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1079,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1066,13 +1089,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1081,7 +1104,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1090,12 +1113,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1127,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1113,13 +1137,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1128,7 +1152,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1137,12 +1161,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1150,7 +1175,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1160,13 +1185,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1175,7 +1200,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1184,12 +1209,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1197,7 +1223,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1207,13 +1233,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1222,7 +1248,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1231,12 +1257,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1244,7 +1271,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1254,13 +1281,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1269,7 +1296,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1278,12 +1305,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1291,7 +1319,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1301,13 +1329,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1344,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1325,12 +1353,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1338,7 +1367,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1348,13 +1377,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1363,7 +1392,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1372,12 +1401,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1385,7 +1415,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1395,13 +1425,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1410,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1419,12 +1449,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1432,7 +1463,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1442,13 +1473,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1457,7 +1488,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1466,12 +1497,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1479,7 +1511,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1489,13 +1521,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1504,7 +1536,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1513,12 +1545,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1526,7 +1559,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1536,13 +1569,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1551,7 +1584,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1560,12 +1593,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1573,7 +1607,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1583,13 +1617,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1598,7 +1632,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1607,12 +1641,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1620,7 +1655,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1630,13 +1665,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1645,7 +1680,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1653,13 +1688,13 @@ declare @llvm.riscv.vssubu.nxv1i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1667,7 +1702,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1677,13 +1712,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1727,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1700,13 +1735,13 @@ declare @llvm.riscv.vssubu.nxv2i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1714,7 +1749,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1724,13 +1759,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1739,7 +1774,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1747,13 +1782,13 @@ declare @llvm.riscv.vssubu.nxv4i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1761,7 +1796,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1771,13 +1806,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1821,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1794,13 +1829,13 @@ declare @llvm.riscv.vssubu.nxv8i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1808,7 +1843,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1818,13 +1853,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1833,7 +1868,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1841,13 +1876,13 @@ declare @llvm.riscv.vssubu.nxv16i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1855,7 +1890,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1865,13 +1900,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1880,7 +1915,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1889,7 +1924,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: @@ -1900,6 +1935,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1908,7 +1944,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1918,8 +1954,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64: @@ -1930,6 +1965,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1939,7 +1975,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1948,7 +1984,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64: @@ -1959,6 +1995,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1967,7 +2004,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1977,8 +2014,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64: @@ -1989,6 +2025,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1998,7 +2035,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2007,7 +2044,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64: @@ -2018,6 +2055,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2026,7 +2064,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -2036,8 +2074,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64: @@ -2048,6 +2085,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2057,7 +2095,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2066,7 +2104,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64: @@ -2077,6 +2115,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2085,7 +2124,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -2095,8 +2134,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64: @@ -2107,6 +2145,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2116,7 +2155,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -5,12 +5,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -18,7 +19,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -28,13 +29,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -52,12 +53,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -65,7 +67,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -75,13 +77,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -90,7 +92,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -99,12 +101,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -112,7 +115,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -122,13 +125,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +140,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -146,12 +149,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -159,7 +163,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -169,13 +173,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -184,7 +188,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -193,12 +197,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -206,7 +211,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -216,13 +221,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -231,7 +236,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -240,12 +245,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -253,7 +259,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -263,13 +269,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -278,7 +284,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -287,12 +293,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -300,7 +307,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -310,14 +317,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -326,7 +333,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -358,13 +366,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -373,7 +381,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -382,12 +390,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -395,7 +404,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -405,13 +414,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -420,7 +429,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -429,12 +438,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -442,7 +452,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -452,13 +462,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -467,7 +477,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -476,12 +486,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -489,7 +500,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -499,13 +510,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -514,7 +525,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -523,12 +534,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -546,13 +558,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -561,7 +573,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -570,12 +582,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -583,7 +596,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -593,14 +606,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -609,7 +622,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -618,12 +631,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -631,7 +645,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -641,13 +655,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +670,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -665,12 +679,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -678,7 +693,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -688,13 +703,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +718,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -712,12 +727,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -725,7 +741,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -735,13 +751,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -750,7 +766,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -759,12 +775,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -772,7 +789,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -782,13 +799,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +814,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -806,12 +823,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -819,7 +837,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -829,14 +847,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -845,7 +863,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -854,12 +872,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -867,7 +886,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -877,13 +896,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +911,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -901,12 +920,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -914,7 +934,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -924,13 +944,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +959,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -948,12 +968,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -961,7 +982,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -971,13 +992,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1007,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -995,12 +1016,13 @@ , , , - i64); + i64, i64); define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1008,7 +1030,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1018,14 +1040,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1056,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1043,12 +1065,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1079,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1066,13 +1089,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1081,7 +1104,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1090,12 +1113,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1127,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1113,13 +1137,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1128,7 +1152,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1137,12 +1161,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1150,7 +1175,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1160,13 +1185,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1175,7 +1200,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1184,12 +1209,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1197,7 +1223,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1207,13 +1233,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1222,7 +1248,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1231,12 +1257,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1244,7 +1271,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1254,13 +1281,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1269,7 +1296,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1278,12 +1305,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1291,7 +1319,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1301,13 +1329,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1344,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1325,12 +1353,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1338,7 +1367,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1348,13 +1377,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1363,7 +1392,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1372,12 +1401,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1385,7 +1415,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1395,13 +1425,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1410,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1419,12 +1449,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1432,7 +1463,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1442,13 +1473,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1457,7 +1488,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1466,12 +1497,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1479,7 +1511,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1489,13 +1521,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1504,7 +1536,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1513,12 +1545,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1526,7 +1559,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1536,13 +1569,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1551,7 +1584,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1560,12 +1593,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1573,7 +1607,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1583,13 +1617,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1598,7 +1632,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1607,12 +1641,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1620,7 +1655,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1630,13 +1665,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1645,7 +1680,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1654,12 +1689,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1667,7 +1703,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1677,13 +1713,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1728,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1701,12 +1737,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1714,7 +1751,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1724,13 +1761,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1739,7 +1776,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1748,12 +1785,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1761,7 +1799,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1771,13 +1809,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1824,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1795,12 +1833,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1808,7 +1847,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1818,13 +1857,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1833,7 +1872,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1842,12 +1881,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1855,7 +1895,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1865,13 +1905,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1880,7 +1920,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1888,13 +1928,13 @@ declare @llvm.riscv.vssubu.nxv1i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1902,7 +1942,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1912,13 +1952,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1927,7 +1967,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1935,13 +1975,13 @@ declare @llvm.riscv.vssubu.nxv2i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1949,7 +1989,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1959,13 +1999,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1974,7 +2014,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1982,13 +2022,13 @@ declare @llvm.riscv.vssubu.nxv4i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1996,7 +2036,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -2006,13 +2046,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2021,7 +2061,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -2029,13 +2069,13 @@ declare @llvm.riscv.vssubu.nxv8i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2043,7 +2083,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -2053,13 +2093,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2068,7 +2108,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a }