diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -119,11 +119,21 @@ [["vv", "v", "vvUv"], ["vx", "v", "vvz"]]>; +multiclass RVVSignedShiftBuiltinSetRoundingMode + : RVVOutOp1BuiltinSet; + multiclass RVVUnsignedShiftBuiltinSet : RVVOutOp1BuiltinSet; +multiclass RVVUnsignedShiftBuiltinSetRoundingMode + : RVVOutOp1BuiltinSet; + multiclass RVVShiftBuiltinSet : RVVSignedShiftBuiltinSet, RVVUnsignedShiftBuiltinSet; @@ -133,10 +143,22 @@ : RVVOutOp0Op1BuiltinSet; + + multiclass RVVSignedNShiftBuiltinSetRoundingMode + : RVVOutOp0Op1BuiltinSet; + multiclass RVVUnsignedNShiftBuiltinSet : RVVOutOp0Op1BuiltinSet; + + multiclass RVVUnsignedNShiftBuiltinSetRoundingMode + : RVVOutOp0Op1BuiltinSet; + } multiclass RVVCarryinBuiltinSet @@ -1743,7 +1765,6 @@ defm vssubu : RVVUnsignedBinBuiltinSet; defm vssub : RVVSignedBinBuiltinSet; -// 13.2. Vector Single-Width Averaging Add and Subtract let ManualCodegen = [{ { // LLVM intrinsic @@ -1779,24 +1800,62 @@ return Builder.CreateCall(F, Operands, ""); } }] in { + // 13.2. Vector Single-Width Averaging Add and Subtract defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode; defm vaadd : RVVSignedBinBuiltinSetRoundingMode; defm vasubu : RVVUnsignedBinBuiltinSetRoundingMode; defm vasub : RVVSignedBinBuiltinSetRoundingMode; -} -// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation -let RequiredFeatures = ["FullMultiply"] in { -defm vsmul : RVVSignedBinBuiltinSet; + // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation + let RequiredFeatures = ["FullMultiply"] in { + defm vsmul : RVVSignedBinBuiltinSetRoundingMode; + } + + // 13.4. Vector Single-Width Scaling Shift Instructions + defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode; + defm vssra : RVVSignedShiftBuiltinSetRoundingMode; } -// 13.4. Vector Single-Width Scaling Shift Instructions -defm vssrl : RVVUnsignedShiftBuiltinSet; -defm vssra : RVVSignedShiftBuiltinSet; +let ManualCodegen = [{ + { + // LLVM intrinsic + // Unmasked: (passthru, op0, op1, round_mode, vl) + // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) -// 13.5. Vector Narrowing Fixed-Point Clip Instructions -defm vnclipu : RVVUnsignedNShiftBuiltinSet; -defm vnclip : RVVSignedNShiftBuiltinSet; + SmallVector Operands; + bool HasMaskedOff = !( + (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || + (!IsMasked && PolicyAttrs & RVV_VTA)); + unsigned Offset = IsMasked ? + (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); + + if (!HasMaskedOff) + Operands.push_back(llvm::PoisonValue::get(ResultType)); + else + Operands.push_back(Ops[IsMasked ? 1 : 0]); + + Operands.push_back(Ops[Offset]); // op0 + Operands.push_back(Ops[Offset + 1]); // op1 + + if (IsMasked) + Operands.push_back(Ops[0]); // mask + + Operands.push_back(Ops[Offset + 2]); // vxrm + Operands.push_back(Ops[Offset + 3]); // vl + + if (IsMasked) + Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); + + IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(), + Ops.back()->getType()}; + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + return Builder.CreateCall(F, Operands, ""); + } +}] in { + // 13.5. Vector Narrowing Fixed-Point Clip Instructions + defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode; + defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode; +} // 14. Vector Floating-Point Instructions // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -4702,6 +4702,16 @@ case RISCVVector::BI__builtin_rvv_vasubu_vx_ta: case RISCVVector::BI__builtin_rvv_vasub_vv_ta: case RISCVVector::BI__builtin_rvv_vasub_vx_ta: + case RISCVVector::BI__builtin_rvv_vsmul_vv_ta: + case RISCVVector::BI__builtin_rvv_vsmul_vx_ta: + case RISCVVector::BI__builtin_rvv_vssra_vv_ta: + case RISCVVector::BI__builtin_rvv_vssra_vx_ta: + case RISCVVector::BI__builtin_rvv_vssrl_vv_ta: + case RISCVVector::BI__builtin_rvv_vssrl_vx_ta: + case RISCVVector::BI__builtin_rvv_vnclip_wv_ta: + case RISCVVector::BI__builtin_rvv_vnclip_wx_ta: + case RISCVVector::BI__builtin_rvv_vnclipu_wv_ta: + case RISCVVector::BI__builtin_rvv_vnclipu_wx_ta: return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu: case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu: @@ -4711,6 +4721,16 @@ case RISCVVector::BI__builtin_rvv_vasubu_vx_tu: case RISCVVector::BI__builtin_rvv_vasub_vv_tu: case RISCVVector::BI__builtin_rvv_vasub_vx_tu: + case RISCVVector::BI__builtin_rvv_vsmul_vv_tu: + case RISCVVector::BI__builtin_rvv_vsmul_vx_tu: + case RISCVVector::BI__builtin_rvv_vssra_vv_tu: + case RISCVVector::BI__builtin_rvv_vssra_vx_tu: + case RISCVVector::BI__builtin_rvv_vssrl_vv_tu: + case RISCVVector::BI__builtin_rvv_vssrl_vx_tu: + case RISCVVector::BI__builtin_rvv_vnclip_wv_tu: + case RISCVVector::BI__builtin_rvv_vnclip_wx_tu: + case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu: + case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu: case RISCVVector::BI__builtin_rvv_vaaddu_vv_tama: case RISCVVector::BI__builtin_rvv_vaaddu_vx_tama: case RISCVVector::BI__builtin_rvv_vaadd_vv_tama: @@ -4719,6 +4739,16 @@ case RISCVVector::BI__builtin_rvv_vasubu_vx_tama: case RISCVVector::BI__builtin_rvv_vasub_vv_tama: case RISCVVector::BI__builtin_rvv_vasub_vx_tama: + case RISCVVector::BI__builtin_rvv_vsmul_vv_tama: + case RISCVVector::BI__builtin_rvv_vsmul_vx_tama: + case RISCVVector::BI__builtin_rvv_vssra_vv_tama: + case RISCVVector::BI__builtin_rvv_vssra_vx_tama: + case RISCVVector::BI__builtin_rvv_vssrl_vv_tama: + case RISCVVector::BI__builtin_rvv_vssrl_vx_tama: + case RISCVVector::BI__builtin_rvv_vnclip_wv_tama: + case RISCVVector::BI__builtin_rvv_vnclip_wx_tama: + case RISCVVector::BI__builtin_rvv_vnclipu_wv_tama: + case RISCVVector::BI__builtin_rvv_vnclipu_wx_tama: return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3); case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum: case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu: @@ -4744,6 +4774,36 @@ case RISCVVector::BI__builtin_rvv_vasub_vx_tum: case RISCVVector::BI__builtin_rvv_vasub_vx_tumu: case RISCVVector::BI__builtin_rvv_vasub_vx_mu: + case RISCVVector::BI__builtin_rvv_vsmul_vv_mu: + case RISCVVector::BI__builtin_rvv_vsmul_vx_mu: + case RISCVVector::BI__builtin_rvv_vssra_vv_mu: + case RISCVVector::BI__builtin_rvv_vssra_vx_mu: + case RISCVVector::BI__builtin_rvv_vssrl_vv_mu: + case RISCVVector::BI__builtin_rvv_vssrl_vx_mu: + case RISCVVector::BI__builtin_rvv_vnclip_wv_mu: + case RISCVVector::BI__builtin_rvv_vnclip_wx_mu: + case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu: + case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu: + case RISCVVector::BI__builtin_rvv_vsmul_vv_tum: + case RISCVVector::BI__builtin_rvv_vsmul_vx_tum: + case RISCVVector::BI__builtin_rvv_vssra_vv_tum: + case RISCVVector::BI__builtin_rvv_vssra_vx_tum: + case RISCVVector::BI__builtin_rvv_vssrl_vv_tum: + case RISCVVector::BI__builtin_rvv_vssrl_vx_tum: + case RISCVVector::BI__builtin_rvv_vnclip_wv_tum: + case RISCVVector::BI__builtin_rvv_vnclip_wx_tum: + case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum: + case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum: + case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu: + case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: + case RISCVVector::BI__builtin_rvv_vssra_vv_tumu: + case RISCVVector::BI__builtin_rvv_vssra_vx_tumu: + case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu: + case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu: + case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu: + case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu: + case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu: + case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu: return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3); case RISCV::BI__builtin_riscv_ntl_load: case RISCV::BI__builtin_riscv_ntl_store: diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c @@ -9,600 +9,600 @@ // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8(op1, shift, vl); + return __riscv_vnclip_wv_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8(op1, shift, vl); + return __riscv_vnclip_wx_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4(op1, shift, vl); + return __riscv_vnclip_wv_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4(op1, shift, vl); + return __riscv_vnclip_wx_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2(op1, shift, vl); + return __riscv_vnclip_wv_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2(op1, shift, vl); + return __riscv_vnclip_wx_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1(op1, shift, vl); + return __riscv_vnclip_wv_i8m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1(op1, shift, vl); + return __riscv_vnclip_wx_i8m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2(op1, shift, vl); + return __riscv_vnclip_wv_i8m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2(op1, shift, vl); + return __riscv_vnclip_wx_i8m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4(op1, shift, vl); + return __riscv_vnclip_wv_i8m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4(op1, shift, vl); + return __riscv_vnclip_wx_i8m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4(op1, shift, vl); + return __riscv_vnclip_wv_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4(op1, shift, vl); + return __riscv_vnclip_wx_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2(op1, shift, vl); + return __riscv_vnclip_wv_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2(op1, shift, vl); + return __riscv_vnclip_wx_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1(op1, shift, vl); + return __riscv_vnclip_wv_i16m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1(op1, shift, vl); + return __riscv_vnclip_wx_i16m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2(op1, shift, vl); + return __riscv_vnclip_wv_i16m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2(op1, shift, vl); + return __riscv_vnclip_wx_i16m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4(op1, shift, vl); + return __riscv_vnclip_wv_i16m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4(op1, shift, vl); + return __riscv_vnclip_wx_i16m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2(op1, shift, vl); + return __riscv_vnclip_wv_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2(op1, shift, vl); + return __riscv_vnclip_wx_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1(op1, shift, vl); + return __riscv_vnclip_wv_i32m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1(op1, shift, vl); + return __riscv_vnclip_wx_i32m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2(op1, shift, vl); + return __riscv_vnclip_wv_i32m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2(op1, shift, vl); + return __riscv_vnclip_wx_i32m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4(op1, shift, vl); + return __riscv_vnclip_wv_i32m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4(op1, shift, vl); + return __riscv_vnclip_wx_i32m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c @@ -9,600 +9,600 @@ // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8(op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8(op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4(op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4(op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2(op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2(op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1(op1, shift, vl); + return __riscv_vnclipu_wv_u8m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1(op1, shift, vl); + return __riscv_vnclipu_wx_u8m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2(op1, shift, vl); + return __riscv_vnclipu_wv_u8m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2(op1, shift, vl); + return __riscv_vnclipu_wx_u8m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4(op1, shift, vl); + return __riscv_vnclipu_wv_u8m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4(op1, shift, vl); + return __riscv_vnclipu_wx_u8m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4(op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4(op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2(op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2(op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1(op1, shift, vl); + return __riscv_vnclipu_wv_u16m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1(op1, shift, vl); + return __riscv_vnclipu_wx_u16m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2(op1, shift, vl); + return __riscv_vnclipu_wv_u16m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2(op1, shift, vl); + return __riscv_vnclipu_wx_u16m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4(op1, shift, vl); + return __riscv_vnclipu_wv_u16m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4(op1, shift, vl); + return __riscv_vnclipu_wx_u16m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2(op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2(op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1(op1, shift, vl); + return __riscv_vnclipu_wv_u32m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1(op1, shift, vl); + return __riscv_vnclipu_wx_u32m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2(op1, shift, vl); + return __riscv_vnclipu_wv_u32m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2(op1, shift, vl); + return __riscv_vnclipu_wx_u32m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4(op1, shift, vl); + return __riscv_vnclipu_wv_u32m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4(op1, shift, vl); + return __riscv_vnclipu_wx_u32m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8(op1, op2, vl); + return __riscv_vsmul_vv_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8(op1, op2, vl); + return __riscv_vsmul_vx_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4(op1, op2, vl); + return __riscv_vsmul_vv_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4(op1, op2, vl); + return __riscv_vsmul_vx_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2(op1, op2, vl); + return __riscv_vsmul_vv_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2(op1, op2, vl); + return __riscv_vsmul_vx_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1(op1, op2, vl); + return __riscv_vsmul_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1(op1, op2, vl); + return __riscv_vsmul_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2(op1, op2, vl); + return __riscv_vsmul_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2(op1, op2, vl); + return __riscv_vsmul_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4(op1, op2, vl); + return __riscv_vsmul_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4(op1, op2, vl); + return __riscv_vsmul_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8(op1, op2, vl); + return __riscv_vsmul_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8(op1, op2, vl); + return __riscv_vsmul_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4(op1, op2, vl); + return __riscv_vsmul_vv_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4(op1, op2, vl); + return __riscv_vsmul_vx_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2(op1, op2, vl); + return __riscv_vsmul_vv_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2(op1, op2, vl); + return __riscv_vsmul_vx_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1(op1, op2, vl); + return __riscv_vsmul_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1(op1, op2, vl); + return __riscv_vsmul_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2(op1, op2, vl); + return __riscv_vsmul_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2(op1, op2, vl); + return __riscv_vsmul_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4(op1, op2, vl); + return __riscv_vsmul_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4(op1, op2, vl); + return __riscv_vsmul_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8(op1, op2, vl); + return __riscv_vsmul_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8(op1, op2, vl); + return __riscv_vsmul_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2(op1, op2, vl); + return __riscv_vsmul_vv_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2(op1, op2, vl); + return __riscv_vsmul_vx_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1(op1, op2, vl); + return __riscv_vsmul_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1(op1, op2, vl); + return __riscv_vsmul_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2(op1, op2, vl); + return __riscv_vsmul_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2(op1, op2, vl); + return __riscv_vsmul_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4(op1, op2, vl); + return __riscv_vsmul_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4(op1, op2, vl); + return __riscv_vsmul_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8(op1, op2, vl); + return __riscv_vsmul_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8(op1, op2, vl); + return __riscv_vsmul_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1(op1, op2, vl); + return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1(op1, op2, vl); + return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2(op1, op2, vl); + return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2(op1, op2, vl); + return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4(op1, op2, vl); + return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4(op1, op2, vl); + return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8(op1, op2, vl); + return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8(op1, op2, vl); + return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8(op1, shift, vl); + return __riscv_vssra_vv_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8(op1, shift, vl); + return __riscv_vssra_vx_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4(op1, shift, vl); + return __riscv_vssra_vv_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4(op1, shift, vl); + return __riscv_vssra_vx_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2(op1, shift, vl); + return __riscv_vssra_vv_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2(op1, shift, vl); + return __riscv_vssra_vx_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1(op1, shift, vl); + return __riscv_vssra_vv_i8m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1(op1, shift, vl); + return __riscv_vssra_vx_i8m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2(op1, shift, vl); + return __riscv_vssra_vv_i8m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2(op1, shift, vl); + return __riscv_vssra_vx_i8m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4(op1, shift, vl); + return __riscv_vssra_vv_i8m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4(op1, shift, vl); + return __riscv_vssra_vx_i8m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8(op1, shift, vl); + return __riscv_vssra_vv_i8m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8(op1, shift, vl); + return __riscv_vssra_vx_i8m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4(op1, shift, vl); + return __riscv_vssra_vv_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4(op1, shift, vl); + return __riscv_vssra_vx_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2(op1, shift, vl); + return __riscv_vssra_vv_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2(op1, shift, vl); + return __riscv_vssra_vx_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1(op1, shift, vl); + return __riscv_vssra_vv_i16m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1(op1, shift, vl); + return __riscv_vssra_vx_i16m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2(op1, shift, vl); + return __riscv_vssra_vv_i16m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2(op1, shift, vl); + return __riscv_vssra_vx_i16m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4(op1, shift, vl); + return __riscv_vssra_vv_i16m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4(op1, shift, vl); + return __riscv_vssra_vx_i16m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8(op1, shift, vl); + return __riscv_vssra_vv_i16m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8(op1, shift, vl); + return __riscv_vssra_vx_i16m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2(op1, shift, vl); + return __riscv_vssra_vv_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2(op1, shift, vl); + return __riscv_vssra_vx_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1(op1, shift, vl); + return __riscv_vssra_vv_i32m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1(op1, shift, vl); + return __riscv_vssra_vx_i32m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2(op1, shift, vl); + return __riscv_vssra_vv_i32m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2(op1, shift, vl); + return __riscv_vssra_vx_i32m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4(op1, shift, vl); + return __riscv_vssra_vv_i32m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4(op1, shift, vl); + return __riscv_vssra_vx_i32m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8(op1, shift, vl); + return __riscv_vssra_vv_i32m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8(op1, shift, vl); + return __riscv_vssra_vx_i32m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1(op1, shift, vl); + return __riscv_vssra_vv_i64m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1(op1, shift, vl); + return __riscv_vssra_vx_i64m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2(op1, shift, vl); + return __riscv_vssra_vv_i64m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2(op1, shift, vl); + return __riscv_vssra_vx_i64m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4(op1, shift, vl); + return __riscv_vssra_vv_i64m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4(op1, shift, vl); + return __riscv_vssra_vx_i64m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8(op1, shift, vl); + return __riscv_vssra_vv_i64m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8(op1, shift, vl); + return __riscv_vssra_vx_i64m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8(op1, shift, vl); + return __riscv_vssrl_vv_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8(op1, shift, vl); + return __riscv_vssrl_vx_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4(op1, shift, vl); + return __riscv_vssrl_vv_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4(op1, shift, vl); + return __riscv_vssrl_vx_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2(op1, shift, vl); + return __riscv_vssrl_vv_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2(op1, shift, vl); + return __riscv_vssrl_vx_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1(op1, shift, vl); + return __riscv_vssrl_vv_u8m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1(op1, shift, vl); + return __riscv_vssrl_vx_u8m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2(op1, shift, vl); + return __riscv_vssrl_vv_u8m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2(op1, shift, vl); + return __riscv_vssrl_vx_u8m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4(op1, shift, vl); + return __riscv_vssrl_vv_u8m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4(op1, shift, vl); + return __riscv_vssrl_vx_u8m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8(op1, shift, vl); + return __riscv_vssrl_vv_u8m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8(op1, shift, vl); + return __riscv_vssrl_vx_u8m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4(op1, shift, vl); + return __riscv_vssrl_vv_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4(op1, shift, vl); + return __riscv_vssrl_vx_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2(op1, shift, vl); + return __riscv_vssrl_vv_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2(op1, shift, vl); + return __riscv_vssrl_vx_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1(op1, shift, vl); + return __riscv_vssrl_vv_u16m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1(op1, shift, vl); + return __riscv_vssrl_vx_u16m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2(op1, shift, vl); + return __riscv_vssrl_vv_u16m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2(op1, shift, vl); + return __riscv_vssrl_vx_u16m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4(op1, shift, vl); + return __riscv_vssrl_vv_u16m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4(op1, shift, vl); + return __riscv_vssrl_vx_u16m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8(op1, shift, vl); + return __riscv_vssrl_vv_u16m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8(op1, shift, vl); + return __riscv_vssrl_vx_u16m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2(op1, shift, vl); + return __riscv_vssrl_vv_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2(op1, shift, vl); + return __riscv_vssrl_vx_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1(op1, shift, vl); + return __riscv_vssrl_vv_u32m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1(op1, shift, vl); + return __riscv_vssrl_vx_u32m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2(op1, shift, vl); + return __riscv_vssrl_vv_u32m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2(op1, shift, vl); + return __riscv_vssrl_vx_u32m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4(op1, shift, vl); + return __riscv_vssrl_vv_u32m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4(op1, shift, vl); + return __riscv_vssrl_vx_u32m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8(op1, shift, vl); + return __riscv_vssrl_vv_u32m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8(op1, shift, vl); + return __riscv_vssrl_vx_u32m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1(op1, shift, vl); + return __riscv_vssrl_vv_u64m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1(op1, shift, vl); + return __riscv_vssrl_vx_u64m1(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2(op1, shift, vl); + return __riscv_vssrl_vv_u64m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2(op1, shift, vl); + return __riscv_vssrl_vx_u64m2(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4(op1, shift, vl); + return __riscv_vssrl_vv_u64m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4(op1, shift, vl); + return __riscv_vssrl_vx_u64m4(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8(op1, shift, vl); + return __riscv_vssrl_vv_u64m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8(op1, shift, vl); + return __riscv_vssrl_vx_u64m8(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c @@ -9,600 +9,600 @@ // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, vl); + return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, vl); + return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c @@ -9,600 +9,600 @@ // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, vl); + return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, vl); + return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, vl); + return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, vl); + return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c @@ -9,880 +9,880 @@ // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, vl); + return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, vl); + return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclip.c @@ -9,1200 +9,1200 @@ // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclipu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclipu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclipu.c @@ -9,1200 +9,1200 @@ // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsmul.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssra.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssrl.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c @@ -9,1200 +9,1200 @@ // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclip_wx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c @@ -9,1200 +9,1200 @@ // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wv_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vnclipu_wx_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vv_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssra_vx_i64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c @@ -9,1760 +9,1760 @@ // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u8m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vv_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssrl_vx_u64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[SHIFT]], [[MASK]], i64 0, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vnclip-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vnclip-out-of-range.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vnclip-out-of-range.c @@ -0,0 +1,66 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -fsyntax-only -verify %s + +#include + +vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wv_i32m1(op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wx_i32m1(op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wv_i32m1_m(mask, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wx_i32m1_m(mask, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wv_i32m1_tu(maskedoff, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wx_i32m1_tu(maskedoff, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, 5, vl); +} + +vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, 5, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vnclipu-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vnclipu-out-of-range.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vnclipu-out-of-range.c @@ -0,0 +1,66 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -fsyntax-only -verify %s + +#include + +vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wv_u32m1(op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wx_u32m1(op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wv_u32m1_m(mask, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wx_u32m1_m(mask, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wv_u32m1_tu(maskedoff, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wx_u32m1_tu(maskedoff, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, 5, vl); +} + +vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, 5, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c @@ -8,144 +8,144 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, vl); + return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, vl); + return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c @@ -8,144 +8,144 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1(op1, op2, vl); + return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1(op1, op2, vl); + return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2(op1, op2, vl); + return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2(op1, op2, vl); + return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4(op1, op2, vl); + return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4(op1, op2, vl); + return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8(op1, op2, vl); + return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8(op1, op2, vl); + return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-out-of-range.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-out-of-range.c @@ -0,0 +1,67 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -fsyntax-only -verify %s + +#include + +vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vv_i32m1(op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vx_i32m1(op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vv_i32m1_tu(maskedoff, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vx_i32m1_tu(maskedoff, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vv_i32m1_tum( + vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, 5, vl); +} + +vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, 5, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vssra-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vssra-out-of-range.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vssra-out-of-range.c @@ -0,0 +1,67 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -fsyntax-only -verify %s + +#include + +vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1(op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1(op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_m(mask, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_m(mask, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_tum( + vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, op2, 5, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vssrl-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vssrl-out-of-range.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vssrl-out-of-range.c @@ -0,0 +1,67 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -fsyntax-only -verify %s + +#include + +vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1(op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1(op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_m(mask, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_m(mask, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_tum( + vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, op2, 5, vl); +} + +vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}} + return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, op2, 5, vl); +} diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -629,6 +629,31 @@ let VLOperand = 4; } // For Saturating binary operations. + // The destination vector type is the same as first source vector. + // The second source operand matches the destination type or is an XLen scalar. + // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) + class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty, LLVMMatchType<2>], + [ImmArg>, IntrNoMem, IntrHasSideEffects]>, + RISCVVIntrinsic { + let VLOperand = 4; + } + // For Saturating binary operations with mask. + // The destination vector type is the same as first source vector. + // The second source operand matches the destination type or is an XLen scalar. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) + class RISCVSaturatingBinaryAAShiftMaskedRoundingMode + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>, LLVMMatchType<2>], + [ImmArg>,ImmArg>, IntrNoMem, IntrHasSideEffects]>, + RISCVVIntrinsic { + let VLOperand = 6; + } + // For Saturating binary operations. // The destination vector type is NOT the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. // Input: (passthru, vector_in, vector_in/scalar_in, vl) @@ -651,6 +676,31 @@ [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let VLOperand = 4; } + // For Saturating binary operations. + // The destination vector type is NOT the same as first source vector. + // The second source operand matches the destination type or is an XLen scalar. + // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) + class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty, LLVMMatchType<3>], + [ImmArg>, IntrNoMem, IntrHasSideEffects]>, + RISCVVIntrinsic { + let VLOperand = 4; + } + // For Saturating binary operations with mask. + // The destination vector type is NOT the same as first source vector (with mask). + // The second source operand matches the destination type or is an XLen scalar. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) + class RISCVSaturatingBinaryABShiftMaskedRoundingMode + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<3>, LLVMMatchType<3>], + [ImmArg>, ImmArg>, IntrNoMem, + IntrHasSideEffects]>, RISCVVIntrinsic { + let VLOperand = 5; + } // Input: (vector_in, vector_in, scalar_in, vl, policy) class RVVSlideUnMasked : DefaultAttrsIntrinsic<[llvm_anyvector_ty], @@ -1085,13 +1135,13 @@ def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode; def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode; } - multiclass RISCVSaturatingBinaryAAShift { - def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMasked; - def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMasked; + multiclass RISCVSaturatingBinaryAAShiftRoundingMode { + def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode; + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode; } - multiclass RISCVSaturatingBinaryABShift { - def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked; - def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked; + multiclass RISCVSaturatingBinaryABShiftRoundingMode { + def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode; + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode; } multiclass RVVSlide { def "int_riscv_" # NAME : RVVSlideUnMasked; @@ -1366,13 +1416,13 @@ defm vasubu : RISCVSaturatingBinaryAAXRoundingMode; defm vasub : RISCVSaturatingBinaryAAXRoundingMode; - defm vsmul : RISCVSaturatingBinaryAAX; + defm vsmul : RISCVSaturatingBinaryAAXRoundingMode; - defm vssrl : RISCVSaturatingBinaryAAShift; - defm vssra : RISCVSaturatingBinaryAAShift; + defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode; + defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode; - defm vnclipu : RISCVSaturatingBinaryABShift; - defm vnclip : RISCVSaturatingBinaryABShift; + defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode; + defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode; defm vmfeq : RISCVCompare; defm vmfne : RISCVCompare; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2090,6 +2090,10 @@ defm _VI : VPseudoBinary; } +multiclass VPseudoBinaryV_VI_RM { + defm _VI : VPseudoBinaryRoundingMode; +} + multiclass VPseudoVALU_MM { foreach m = MxList in { defvar mx = m.MX; @@ -2152,16 +2156,34 @@ !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>; } +multiclass VPseudoBinaryV_WV_RM { + defm _WV : VPseudoBinaryRoundingMode; +} + multiclass VPseudoBinaryV_WX { defm _WX : VPseudoBinary; } +multiclass VPseudoBinaryV_WX_RM { + defm _WX : VPseudoBinaryRoundingMode; +} + multiclass VPseudoBinaryV_WI { defm _WI : VPseudoBinary; } +multiclass VPseudoBinaryV_WI_RM { + defm _WI : VPseudoBinaryRoundingMode; +} + // For vadc and vsbc, the instruction encoding is reserved if the destination // vector register is v0. // For vadc and vsbc, CarryIn == 1 and CarryOut == 0 @@ -2508,7 +2530,7 @@ } } -multiclass VPseudoVSSHT_VV_VX_VI { +multiclass VPseudoVSSHT_VV_VX_VI_RM { foreach m = MxList in { defvar mx = m.MX; defvar WriteVSShiftV_MX = !cast("WriteVSShiftV_" # mx); @@ -2517,11 +2539,11 @@ defvar ReadVSShiftV_MX = !cast("ReadVSShiftV_" # mx); defvar ReadVSShiftX_MX = !cast("ReadVSShiftX_" # mx); - defm "" : VPseudoBinaryV_VV, + defm "" : VPseudoBinaryV_VV_RM, Sched<[WriteVSShiftV_MX, ReadVSShiftV_MX, ReadVSShiftV_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_VX, + defm "" : VPseudoBinaryV_VX_RM, Sched<[WriteVSShiftX_MX, ReadVSShiftV_MX, ReadVSShiftX_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_VI, + defm "" : VPseudoBinaryV_VI_RM, Sched<[WriteVSShiftI_MX, ReadVSShiftV_MX, ReadVMask]>; } } @@ -2559,7 +2581,7 @@ } } -multiclass VPseudoVSMUL_VV_VX { +multiclass VPseudoVSMUL_VV_VX_RM { foreach m = MxList in { defvar mx = m.MX; defvar WriteVSMulV_MX = !cast("WriteVSMulV_" # mx); @@ -2567,9 +2589,9 @@ defvar ReadVSMulV_MX = !cast("ReadVSMulV_" # mx); defvar ReadVSMulX_MX = !cast("ReadVSMulX_" # mx); - defm "" : VPseudoBinaryV_VV, + defm "" : VPseudoBinaryV_VV_RM, Sched<[WriteVSMulV_MX, ReadVSMulV_MX, ReadVSMulV_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_VX, + defm "" : VPseudoBinaryV_VX_RM, Sched<[WriteVSMulX_MX, ReadVSMulV_MX, ReadVSMulX_MX, ReadVMask]>; } } @@ -3061,7 +3083,7 @@ } } -multiclass VPseudoVNCLP_WV_WX_WI { +multiclass VPseudoVNCLP_WV_WX_WI_RM { foreach m = MxListW in { defvar mx = m.MX; defvar WriteVNClipV_MX = !cast("WriteVNClipV_" # mx); @@ -3070,11 +3092,11 @@ defvar ReadVNClipV_MX = !cast("ReadVNClipV_" # mx); defvar ReadVNClipX_MX = !cast("ReadVNClipX_" # mx); - defm "" : VPseudoBinaryV_WV, + defm "" : VPseudoBinaryV_WV_RM, Sched<[WriteVNClipV_MX, ReadVNClipV_MX, ReadVNClipV_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_WX, + defm "" : VPseudoBinaryV_WX_RM, Sched<[WriteVNClipX_MX, ReadVNClipV_MX, ReadVNClipX_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_WI, + defm "" : VPseudoBinaryV_WI_RM, Sched<[WriteVNClipI_MX, ReadVNClipV_MX, ReadVMask]>; } } @@ -4772,6 +4794,18 @@ vti.RegClass, imm_type>; } +multiclass VPatBinaryV_VI_RM vtilist, + Operand imm_type> { + foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in + defm : VPatBinaryTARoundingMode; +} + multiclass VPatBinaryM_MM { foreach mti = AllMasks in let Predicates = [HasVInstructions] in @@ -4867,6 +4901,21 @@ } } +multiclass VPatBinaryV_WV_RM vtilist> { + foreach VtiToWti = vtilist in { + defvar Vti = VtiToWti.Vti; + defvar Wti = VtiToWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatBinaryTARoundingMode; + } +} + multiclass VPatBinaryV_WX vtilist> { foreach VtiToWti = vtilist in { @@ -4882,6 +4931,23 @@ } } +multiclass VPatBinaryV_WX_RM vtilist> { + foreach VtiToWti = vtilist in { + defvar Vti = VtiToWti.Vti; + defvar Wti = VtiToWti.Wti; + defvar kind = "W"#Vti.ScalarSuffix; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatBinaryTARoundingMode; + } +} + + multiclass VPatBinaryV_WI vtilist> { foreach VtiToWti = vtilist in { @@ -4896,6 +4962,21 @@ } } +multiclass VPatBinaryV_WI_RM vtilist> { + foreach VtiToWti = vtilist in { + defvar Vti = VtiToWti.Vti; + defvar Wti = VtiToWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatBinaryTARoundingMode; + } +} + multiclass VPatBinaryV_VM vtilist = AllIntegerVectors> { @@ -5043,6 +5124,12 @@ VPatBinaryV_VX, VPatBinaryV_VI; +multiclass VPatBinaryV_VV_VX_VI_RM vtilist, Operand ImmType = simm5> + : VPatBinaryV_VV_RM, + VPatBinaryV_VX_RM, + VPatBinaryV_VI_RM; + multiclass VPatBinaryV_VV_VX vtilist, bit isSEWAware = 0> : VPatBinaryV_VV, @@ -5074,6 +5161,12 @@ VPatBinaryV_WX, VPatBinaryV_WI; +multiclass VPatBinaryV_WV_WX_WI_RM vtilist> + : VPatBinaryV_WV_RM, + VPatBinaryV_WX_RM, + VPatBinaryV_WI_RM; + multiclass VPatBinaryV_VM_XM_IM : VPatBinaryV_VM_TAIL, VPatBinaryV_XM_TAIL, @@ -5804,24 +5897,22 @@ //===----------------------------------------------------------------------===// // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation //===----------------------------------------------------------------------===// -let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in { - defm PseudoVSMUL : VPseudoVSMUL_VV_VX; +let Defs = [VXSAT], hasSideEffects = 1 in { + defm PseudoVSMUL : VPseudoVSMUL_VV_VX_RM; } //===----------------------------------------------------------------------===// // 12.4. Vector Single-Width Scaling Shift Instructions //===----------------------------------------------------------------------===// -let Uses = [VXRM], hasSideEffects = 1 in { - defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI; - defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI; -} +defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM; +defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM; //===----------------------------------------------------------------------===// // 12.5. Vector Narrowing Fixed-Point Clip Instructions //===----------------------------------------------------------------------===// -let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in { - defm PseudoVNCLIP : VPseudoVNCLP_WV_WX_WI; - defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI; +let Defs = [VXSAT], hasSideEffects = 1 in { + defm PseudoVNCLIP : VPseudoVNCLP_WV_WX_WI_RM; + defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI_RM; } } // Predicates = [HasVInstructions] @@ -6474,24 +6565,28 @@ //===----------------------------------------------------------------------===// // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", IntegerVectorsExceptI64>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL", + IntegerVectorsExceptI64>; // vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. let Predicates = [HasVInstructionsFullMultiply] in -defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", I64IntegerVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL", + I64IntegerVectors>; //===----------------------------------------------------------------------===// // 12.4. Vector Single-Width Scaling Shift Instructions //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors, - uimm5>; -defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors, - uimm5>; +defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssrl", "PseudoVSSRL", + AllIntegerVectors, uimm5>; +defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssra", "PseudoVSSRA", + AllIntegerVectors, uimm5>; //===----------------------------------------------------------------------===// // 12.5. Vector Narrowing Fixed-Point Clip Instructions //===----------------------------------------------------------------------===// -defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>; -defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>; +defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclipu", "PseudoVNCLIPU", + AllWidenableIntVectors>; +defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclip", "PseudoVNCLIP", + AllWidenableIntVectors>; //===----------------------------------------------------------------------===// // 13. Vector Floating-Point Instructions diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -537,12 +537,14 @@ , , iXLen, + iXLen, iXLen); define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -551,7 +553,7 @@ %0, %1, %2, - iXLen %3, iXLen 3) + iXLen 0, iXLen %3, iXLen 3) ret %a } @@ -562,12 +564,14 @@ , , iXLen, + iXLen, iXLen); define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -576,7 +580,7 @@ %0, %1, %2, - iXLen %3, iXLen 3) + iXLen 0, iXLen %3, iXLen 3) ret %a } @@ -587,12 +591,14 @@ , , iXLen, + iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -601,7 +607,7 @@ %0, %1, %2, - iXLen %3, iXLen 3) + iXLen 0, iXLen %3, iXLen 3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -510,11 +510,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -523,7 +525,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -534,11 +536,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -547,7 +551,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -558,11 +562,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -571,7 +577,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -510,11 +510,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -523,7 +525,7 @@ %1, %2, %3, - iXLen %4, iXLen 2) + iXLen 0, iXLen %4, iXLen 2) ret %a } @@ -534,11 +536,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -547,7 +551,7 @@ %1, %2, %3, - iXLen %4, iXLen 2) + iXLen 0, iXLen %4, iXLen 2) ret %a } @@ -558,11 +562,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -571,7 +577,7 @@ %1, %2, %3, - iXLen %4, iXLen 2) + iXLen 0, iXLen %4, iXLen 2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -510,11 +510,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -523,7 +525,7 @@ %1, %2, %3, - iXLen %4, iXLen 0) + iXLen 0, iXLen %4, iXLen 0) ret %a } @@ -534,11 +536,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -547,7 +551,7 @@ %1, %2, %3, - iXLen %4, iXLen 0) + iXLen 0, iXLen %4, iXLen 0) ret %a } @@ -558,11 +562,13 @@ , , iXLen, + iXLen, iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -571,7 +577,7 @@ %1, %2, %3, - iXLen %4, iXLen 0) + iXLen 0, iXLen %4, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll --- a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll @@ -12,14 +12,15 @@ ; CHECK-NEXT: addi a1, a1, %lo(.L__const.test.var_45) ; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vmul.vx v12, v8, a1 ; CHECK-NEXT: lui a1, %hi(.L__const.test.var_101) ; CHECK-NEXT: addi a1, a1, %lo(.L__const.test.var_101) -; CHECK-NEXT: vle8.v v12, (a1) -; CHECK-NEXT: li a1, 1 -; CHECK-NEXT: vmul.vx v16, v8, a1 -; CHECK-NEXT: vmv.x.s a1, v16 +; CHECK-NEXT: vle8.v v16, (a1) +; CHECK-NEXT: vmv.x.s a1, v12 +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vmsleu.vx v0, v8, a1 -; CHECK-NEXT: vssra.vv v8, v12, v8 +; CHECK-NEXT: vssra.vv v8, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v8, v0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -29,7 +30,7 @@ %2 = tail call @llvm.riscv.vle.nxv32i8.i64( undef, ptr nonnull @__const.test.var_101, i64 2) %3 = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2) %4 = tail call i8 @llvm.riscv.vmv.x.s.nxv32i8( %1) - %5 = tail call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( undef, %2, %0, i64 2) + %5 = tail call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( undef, %2, %0, i64 0, i64 2) %6 = tail call @llvm.riscv.vmsleu.nxv32i8.i8.i64( %0, i8 %4, i64 2) %7 = tail call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, %5, %5, %6, i64 2) tail call void @llvm.riscv.vse.nxv32i8.i64( %7, ptr %var_99, i64 2) @@ -40,7 +41,7 @@ declare @llvm.riscv.vmul.nxv32i8.i8.i64(, , i8, i64) #2 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3 declare i8 @llvm.riscv.vmv.x.s.nxv32i8() #2 -declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(, , , i64) #3 +declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(, , , i64, i64) #3 declare @llvm.riscv.vmsleu.nxv32i8.i8.i64(, i8, i64) #2 declare @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(, , , , i64) #2 declare void @llvm.riscv.vse.nxv32i8.i64(, ptr nocapture, i64) #4 diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -944,12 +944,14 @@ , , , + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -957,7 +959,7 @@ %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -966,12 +968,14 @@ , , , + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -979,7 +983,7 @@ %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1261,12 +1265,14 @@ , , , + iXLen, iXLen); define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1274,7 +1280,7 @@ %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1283,6 +1289,7 @@ , , i64, + iXLen, iXLen); define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { @@ -1295,6 +1302,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vsmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1302,6 +1310,7 @@ ; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vsmul.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1309,7 +1318,7 @@ %0, %1, i64 %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1361,12 +1370,14 @@ , , , + iXLen, iXLen); define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1374,7 +1385,7 @@ %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -1383,12 +1394,14 @@ , , , + iXLen, iXLen); define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1396,7 +1409,7 @@ %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll @@ -7,12 +7,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -20,7 +21,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -30,13 +31,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -45,7 +46,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -54,12 +55,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -67,7 +69,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -77,13 +79,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -92,7 +94,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -101,12 +103,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -114,7 +117,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -124,13 +127,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -139,7 +142,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -148,12 +151,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -162,7 +166,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -172,13 +176,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -187,7 +191,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -196,12 +200,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -210,7 +215,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -220,13 +225,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -235,7 +240,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -244,12 +249,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -258,7 +264,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -268,13 +274,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +289,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -292,12 +298,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -315,13 +322,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -330,7 +337,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -339,12 +346,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -352,7 +360,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -362,13 +370,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -377,7 +385,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -386,12 +394,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -400,7 +409,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -410,13 +419,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -425,7 +434,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -434,12 +443,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -448,7 +458,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -458,13 +468,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +483,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -482,12 +492,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -496,7 +507,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -506,13 +517,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -521,7 +532,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -530,12 +541,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -543,7 +555,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -553,13 +565,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +580,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -577,12 +589,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -591,7 +604,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -601,13 +614,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -616,7 +629,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -625,12 +638,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -639,7 +653,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -649,13 +663,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -664,7 +678,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -673,12 +687,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -687,7 +702,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -697,13 +712,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -712,7 +727,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -720,13 +735,13 @@ declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -734,7 +749,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -744,13 +759,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -759,7 +774,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -767,13 +782,13 @@ declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -781,7 +796,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -791,13 +806,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +821,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -814,13 +829,13 @@ declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -828,7 +843,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -838,13 +853,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -853,7 +868,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -861,13 +876,13 @@ declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -876,7 +891,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -886,13 +901,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -901,7 +916,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -909,13 +924,13 @@ declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -924,7 +939,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -934,13 +949,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -949,7 +964,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -957,13 +972,13 @@ declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -972,7 +987,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -982,13 +997,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -997,7 +1012,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1005,13 +1020,13 @@ declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1019,7 +1034,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1029,13 +1044,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1044,7 +1059,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1052,13 +1067,13 @@ declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1066,7 +1081,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1076,13 +1091,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1091,7 +1106,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1099,13 +1114,13 @@ declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1114,7 +1129,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1124,13 +1139,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1139,7 +1154,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1147,13 +1162,13 @@ declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1162,7 +1177,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1172,13 +1187,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1202,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1195,13 +1210,13 @@ declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1210,7 +1225,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1220,13 +1235,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1235,7 +1250,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1243,13 +1258,13 @@ declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1257,7 +1272,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1267,13 +1282,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1282,7 +1297,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1290,13 +1305,13 @@ declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1305,7 +1320,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1315,13 +1330,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1330,7 +1345,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1338,13 +1353,13 @@ declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1353,7 +1368,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1363,13 +1378,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1378,7 +1393,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1386,13 +1401,13 @@ declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1401,7 +1416,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1411,13 +1426,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1426,7 +1441,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1435,6 +1450,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1442,7 +1458,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1451,6 +1467,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1459,7 +1476,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1468,6 +1485,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1475,7 +1493,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1484,6 +1502,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1492,7 +1511,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1501,6 +1520,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1508,7 +1528,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1517,6 +1537,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1525,7 +1546,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1534,6 +1555,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1542,7 +1564,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1551,6 +1573,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1559,7 +1582,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1568,6 +1591,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1576,7 +1600,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1585,6 +1609,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1618,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1602,6 +1627,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1610,7 +1636,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1619,6 +1645,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1627,7 +1654,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1636,6 +1663,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1643,7 +1671,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1652,6 +1680,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1660,7 +1689,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1669,6 +1698,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1685,6 +1715,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1724,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1702,6 +1733,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1710,7 +1742,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1719,6 +1751,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1760,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1736,6 +1769,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1744,7 +1778,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1753,6 +1787,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1796,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1770,6 +1805,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1778,7 +1814,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1787,6 +1823,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1832,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1804,6 +1841,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1811,7 +1849,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1820,6 +1858,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1828,7 +1867,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1837,6 +1876,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1845,7 +1885,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1854,6 +1894,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1862,7 +1903,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1871,6 +1912,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1879,7 +1921,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1888,6 +1930,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1896,7 +1939,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1905,6 +1948,7 @@ ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1913,7 +1957,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1922,6 +1966,7 @@ ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1930,7 +1975,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll @@ -7,12 +7,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -20,7 +21,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -30,13 +31,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -45,7 +46,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -54,12 +55,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -67,7 +69,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -77,13 +79,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -92,7 +94,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -101,12 +103,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -114,7 +117,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -124,13 +127,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -139,7 +142,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -148,12 +151,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -162,7 +166,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -172,13 +176,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -187,7 +191,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -196,12 +200,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -210,7 +215,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -220,13 +225,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -235,7 +240,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -244,12 +249,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -258,7 +264,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -268,13 +274,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +289,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -292,12 +298,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -315,13 +322,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -330,7 +337,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -339,12 +346,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -352,7 +360,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -362,13 +370,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -377,7 +385,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -386,12 +394,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -400,7 +409,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -410,13 +419,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -425,7 +434,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -434,12 +443,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -448,7 +458,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -458,13 +468,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +483,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -482,12 +492,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -496,7 +507,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -506,13 +517,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -521,7 +532,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -530,12 +541,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -543,7 +555,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -553,13 +565,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +580,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -577,12 +589,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -591,7 +604,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -601,13 +614,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -616,7 +629,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -625,12 +638,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -639,7 +653,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -649,13 +663,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -664,7 +678,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -673,12 +687,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -687,7 +702,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -697,13 +712,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -712,7 +727,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -720,13 +735,13 @@ declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -734,7 +749,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -744,13 +759,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -759,7 +774,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -767,13 +782,13 @@ declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -781,7 +796,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -791,13 +806,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +821,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -814,13 +829,13 @@ declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -828,7 +843,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -838,13 +853,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -853,7 +868,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -861,13 +876,13 @@ declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -876,7 +891,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -886,13 +901,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -901,7 +916,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -909,13 +924,13 @@ declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -924,7 +939,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -934,13 +949,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -949,7 +964,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -957,13 +972,13 @@ declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -972,7 +987,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -982,13 +997,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -997,7 +1012,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1005,13 +1020,13 @@ declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1019,7 +1034,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1029,13 +1044,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1044,7 +1059,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1052,13 +1067,13 @@ declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1066,7 +1081,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1076,13 +1091,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1091,7 +1106,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1099,13 +1114,13 @@ declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1114,7 +1129,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1124,13 +1139,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1139,7 +1154,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1147,13 +1162,13 @@ declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1162,7 +1177,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1172,13 +1187,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1202,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1195,13 +1210,13 @@ declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1210,7 +1225,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1220,13 +1235,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1235,7 +1250,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1243,13 +1258,13 @@ declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1257,7 +1272,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1267,13 +1282,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1282,7 +1297,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1290,13 +1305,13 @@ declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1305,7 +1320,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1315,13 +1330,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1330,7 +1345,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1338,13 +1353,13 @@ declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1353,7 +1368,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1363,13 +1378,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1378,7 +1393,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1386,13 +1401,13 @@ declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1401,7 +1416,7 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1411,13 +1426,13 @@ , iXLen, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1426,7 +1441,7 @@ %1, iXLen %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1435,6 +1450,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1442,7 +1458,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1451,6 +1467,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1459,7 +1476,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1468,6 +1485,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1475,7 +1493,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1484,6 +1502,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1492,7 +1511,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1501,6 +1520,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1508,7 +1528,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1517,6 +1537,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1525,7 +1546,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1534,6 +1555,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1542,7 +1564,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1551,6 +1573,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1559,7 +1582,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1568,6 +1591,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1576,7 +1600,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1585,6 +1609,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1618,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1602,6 +1627,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1610,7 +1636,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1619,6 +1645,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1627,7 +1654,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1636,6 +1663,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1643,7 +1671,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1652,6 +1680,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1660,7 +1689,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1669,6 +1698,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1685,6 +1715,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1724,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1702,6 +1733,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1710,7 +1742,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1719,6 +1751,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1760,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1736,6 +1769,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1744,7 +1778,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1753,6 +1787,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1796,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1770,6 +1805,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1778,7 +1814,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1787,6 +1823,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1832,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1804,6 +1841,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1811,7 +1849,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1820,6 +1858,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1828,7 +1867,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1837,6 +1876,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1845,7 +1885,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1854,6 +1894,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1862,7 +1903,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1871,6 +1912,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1879,7 +1921,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1888,6 +1930,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1896,7 +1939,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } @@ -1905,6 +1948,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1913,7 +1957,7 @@ undef, %0, iXLen 9, - iXLen %1) + iXLen 0, iXLen %1) ret %a } @@ -1922,6 +1966,7 @@ ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1930,7 +1975,7 @@ %1, iXLen 9, %2, - iXLen %3, iXLen 1) + iXLen 0, iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -10,12 +10,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -23,7 +24,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -33,13 +34,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -48,7 +49,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -57,12 +58,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -70,7 +72,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -80,13 +82,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -95,7 +97,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -104,12 +106,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -117,7 +120,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -127,13 +130,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -142,7 +145,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -151,12 +154,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -164,7 +168,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -174,13 +178,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -189,7 +193,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -198,12 +202,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -211,7 +216,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -221,13 +226,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +241,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -245,12 +250,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -258,7 +264,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -268,13 +274,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +289,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -292,12 +298,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -315,14 +322,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -331,7 +338,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -340,12 +347,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -353,7 +361,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -363,13 +371,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +386,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -400,7 +409,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -410,13 +419,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -425,7 +434,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -434,12 +443,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +457,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -457,13 +467,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +482,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -481,12 +491,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -494,7 +505,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -504,13 +515,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -519,7 +530,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -528,12 +539,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -541,7 +553,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -551,13 +563,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -566,7 +578,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -575,12 +587,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -588,7 +601,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -598,14 +611,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +627,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -623,12 +636,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -636,7 +650,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -646,13 +660,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -661,7 +675,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -670,12 +684,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -683,7 +698,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -693,13 +708,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -708,7 +723,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -717,12 +732,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -730,7 +746,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -740,13 +756,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -755,7 +771,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -764,12 +780,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -777,7 +794,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -787,13 +804,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -802,7 +819,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -811,12 +828,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -824,7 +842,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -834,14 +852,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -850,7 +868,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -859,12 +877,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -872,7 +891,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -882,13 +901,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -897,7 +916,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -906,12 +925,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -919,7 +939,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -929,13 +949,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +964,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -953,12 +973,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -966,7 +987,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -976,13 +997,13 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -991,7 +1012,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1000,12 +1021,13 @@ , , , - i32); + i32, i32); define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1013,7 +1035,7 @@ undef, %0, %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1023,14 +1045,14 @@ , , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1039,7 +1061,7 @@ %1, %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1048,12 +1070,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1061,7 +1084,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1071,13 +1094,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1086,7 +1109,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1095,12 +1118,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1108,7 +1132,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1118,13 +1142,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1133,7 +1157,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1142,12 +1166,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1155,7 +1180,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1165,13 +1190,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1180,7 +1205,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1189,12 +1214,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1202,7 +1228,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1212,13 +1238,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1227,7 +1253,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1236,12 +1262,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1249,7 +1276,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1259,13 +1286,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1274,7 +1301,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1283,12 +1310,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1296,7 +1324,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1306,13 +1334,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1321,7 +1349,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1330,12 +1358,13 @@ , , i8, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1343,7 +1372,7 @@ undef, %0, i8 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1353,13 +1382,13 @@ , i8, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1368,7 +1397,7 @@ %1, i8 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1377,12 +1406,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1390,7 +1420,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1400,13 +1430,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1415,7 +1445,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1424,12 +1454,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1437,7 +1468,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1447,13 +1478,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1462,7 +1493,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1471,12 +1502,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1484,7 +1516,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1494,13 +1526,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1509,7 +1541,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1518,12 +1550,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1531,7 +1564,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1541,13 +1574,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1556,7 +1589,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1565,12 +1598,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1578,7 +1612,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1588,13 +1622,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1603,7 +1637,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1612,12 +1646,13 @@ , , i16, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1625,7 +1660,7 @@ undef, %0, i16 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1635,13 +1670,13 @@ , i16, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1650,7 +1685,7 @@ %1, i16 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1658,13 +1693,13 @@ declare @llvm.riscv.vsmul.nxv1i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1707,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1682,13 +1717,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1697,7 +1732,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1705,13 +1740,13 @@ declare @llvm.riscv.vsmul.nxv2i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1719,7 +1754,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1729,13 +1764,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1779,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1752,13 +1787,13 @@ declare @llvm.riscv.vsmul.nxv4i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1766,7 +1801,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1776,13 +1811,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1791,7 +1826,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1799,13 +1834,13 @@ declare @llvm.riscv.vsmul.nxv8i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1813,7 +1848,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1823,13 +1858,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1838,7 +1873,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1846,13 +1881,13 @@ declare @llvm.riscv.vsmul.nxv16i32.i32( , , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1895,7 @@ undef, %0, i32 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1870,13 +1905,13 @@ , i32, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1885,7 +1920,7 @@ %1, i32 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1894,7 +1929,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: @@ -1905,6 +1940,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1913,7 +1949,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1923,8 +1959,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64: @@ -1935,6 +1970,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1944,7 +1980,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -1953,7 +1989,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64: @@ -1964,6 +2000,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1972,7 +2009,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -1982,8 +2019,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64: @@ -1994,6 +2030,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2003,7 +2040,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2012,7 +2049,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64: @@ -2023,6 +2060,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2031,7 +2069,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -2041,8 +2079,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64: @@ -2053,6 +2090,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2062,7 +2100,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } @@ -2071,7 +2109,7 @@ , , i64, - i32); + i32, i32); define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64: @@ -2082,6 +2120,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2090,7 +2129,7 @@ undef, %0, i64 %1, - i32 %2) + i32 0, i32 %2) ret %a } @@ -2100,8 +2139,7 @@ , i64, , - i32, - i32); + i32, i32, i32); define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64: @@ -2112,6 +2150,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2121,7 +2160,7 @@ %1, i64 %2, %3, - i32 %4, i32 1) + i32 0, i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -10,12 +10,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -23,7 +24,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -33,13 +34,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -48,7 +49,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -57,12 +58,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -70,7 +72,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -80,13 +82,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -95,7 +97,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -104,12 +106,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -117,7 +120,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -127,13 +130,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -142,7 +145,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -151,12 +154,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -164,7 +168,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -174,13 +178,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -189,7 +193,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -198,12 +202,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -211,7 +216,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -221,13 +226,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +241,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -245,12 +250,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -258,7 +264,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -268,13 +274,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +289,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -292,12 +298,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -315,14 +322,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -331,7 +338,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -340,12 +347,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -353,7 +361,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -363,13 +371,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +386,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -400,7 +409,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -410,13 +419,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -425,7 +434,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -434,12 +443,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +457,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -457,13 +467,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +482,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -481,12 +491,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -494,7 +505,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -504,13 +515,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -519,7 +530,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -528,12 +539,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -541,7 +553,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -551,13 +563,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -566,7 +578,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -575,12 +587,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -588,7 +601,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -598,14 +611,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +627,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -623,12 +636,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -636,7 +650,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -646,13 +660,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -661,7 +675,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -670,12 +684,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -683,7 +698,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -693,13 +708,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -708,7 +723,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -717,12 +732,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -730,7 +746,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -740,13 +756,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -755,7 +771,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -764,12 +780,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -777,7 +794,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -787,13 +804,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -802,7 +819,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -811,12 +828,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -824,7 +842,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -834,14 +852,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -850,7 +868,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -859,12 +877,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -872,7 +891,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -882,13 +901,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -897,7 +916,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -906,12 +925,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -919,7 +939,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -929,13 +949,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +964,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -953,12 +973,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -966,7 +987,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -976,13 +997,13 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -991,7 +1012,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1000,12 +1021,13 @@ , , , - i64); + i64, i64); define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1013,7 +1035,7 @@ undef, %0, %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1023,14 +1045,14 @@ , , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1039,7 +1061,7 @@ %1, %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1048,12 +1070,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1061,7 +1084,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1071,13 +1094,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1086,7 +1109,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1095,12 +1118,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1108,7 +1132,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1118,13 +1142,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1133,7 +1157,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1142,12 +1166,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1155,7 +1180,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1165,13 +1190,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1180,7 +1205,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1189,12 +1214,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1202,7 +1228,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1212,13 +1238,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1227,7 +1253,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1236,12 +1262,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1249,7 +1276,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1259,13 +1286,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1274,7 +1301,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1283,12 +1310,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1296,7 +1324,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1306,13 +1334,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1321,7 +1349,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1330,12 +1358,13 @@ , , i8, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1343,7 +1372,7 @@ undef, %0, i8 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1353,13 +1382,13 @@ , i8, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1368,7 +1397,7 @@ %1, i8 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1377,12 +1406,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1390,7 +1420,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1400,13 +1430,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1415,7 +1445,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1424,12 +1454,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1437,7 +1468,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1447,13 +1478,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1462,7 +1493,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1471,12 +1502,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1484,7 +1516,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1494,13 +1526,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1509,7 +1541,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1518,12 +1550,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1531,7 +1564,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1541,13 +1574,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1556,7 +1589,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1565,12 +1598,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1578,7 +1612,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1588,13 +1622,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1603,7 +1637,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1612,12 +1646,13 @@ , , i16, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1625,7 +1660,7 @@ undef, %0, i16 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1635,13 +1670,13 @@ , i16, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1650,7 +1685,7 @@ %1, i16 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1659,12 +1694,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1708,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1682,13 +1718,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1697,7 +1733,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1706,12 +1742,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1719,7 +1756,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1729,13 +1766,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1781,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1753,12 +1790,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1766,7 +1804,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1776,13 +1814,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1791,7 +1829,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1800,12 +1838,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1813,7 +1852,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1823,13 +1862,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1838,7 +1877,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1847,12 +1886,13 @@ , , i32, - i64); + i64, i64); define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1900,7 @@ undef, %0, i32 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1870,13 +1910,13 @@ , i32, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1885,7 +1925,7 @@ %1, i32 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1893,13 +1933,13 @@ declare @llvm.riscv.vsmul.nxv1i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1907,7 +1947,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1917,13 +1957,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1932,7 +1972,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1940,13 +1980,13 @@ declare @llvm.riscv.vsmul.nxv2i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1954,7 +1994,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -1964,13 +2004,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1979,7 +2019,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -1987,13 +2027,13 @@ declare @llvm.riscv.vsmul.nxv4i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2001,7 +2041,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -2011,13 +2051,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2026,7 +2066,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } @@ -2034,13 +2074,13 @@ declare @llvm.riscv.vsmul.nxv8i64.i64( , , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2048,7 +2088,7 @@ undef, %0, i64 %1, - i64 %2) + i64 0, i64 %2) ret %a } @@ -2058,13 +2098,13 @@ , i64, , - i64, - i64); + i64, i64, i64); define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2073,7 +2113,7 @@ %1, i64 %2, %3, - i64 %4, i64 1) + i64 0, i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -1,2479 +1,1235 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vssra.nxv1i8.nxv1i8( - , - , - , - i32); -define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vssra_vv_i8mf8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i8.nxv1i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv1i8.nxv1i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vssra_vx_i8mf8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv2i8.nxv2i8( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv1i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vssra_vv_i8mf4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i8.nxv2i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.nxv2i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv2i8.nxv2i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vssra_vx_i8mf4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv4i8.nxv4i8( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv2i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vssra_vv_i8mf2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i8.nxv4i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.nxv4i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv4i8.nxv4i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vssra_vx_i8mf2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv8i8.nxv8i8( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv4i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vssra_vv_i8m1( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i8.nxv8i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.nxv8i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv8i8.nxv8i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vssra_vx_i8m1( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv16i8.nxv16i8( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv8i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vssra_vv_i8m2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i8.nxv16i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.nxv16i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv16i8.nxv16i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vssra_vx_i8m2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv32i8.nxv32i8( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv16i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vssra_vv_i8m4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i8.nxv32i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.nxv32i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vssra_vx_i8m4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv32i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv64i8.nxv64i8( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv32i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vssra_vv_i8m8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv64i8.nxv64i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.nxv64i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv64i8.nxv64i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vssra_vx_i8m8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv64i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv1i16.nxv1i16( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv64i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vssra_vv_i16mf4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i16.nxv1i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.nxv1i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv1i16.nxv1i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vssra_vx_i16mf4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv2i16.nxv2i16( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv1i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vssra_vv_i16mf2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i16.nxv2i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.nxv2i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv2i16.nxv2i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vssra_vx_i16mf2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv4i16.nxv4i16( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv2i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vssra_vv_i16m1( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i16.nxv4i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.nxv4i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv4i16.nxv4i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vssra_vx_i16m1( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv8i16.nxv8i16( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv4i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vssra_vv_i16m2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i16.nxv8i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.nxv8i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv8i16.nxv8i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vssra_vx_i16m2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv16i16.nxv16i16( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv8i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vssra_vv_i16m4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i16.nxv16i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.nxv16i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv16i16.nxv16i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vssra_vx_i16m4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv32i16.nxv32i16( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv16i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vssra_vv_i16m8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i16.nxv32i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.nxv32i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv32i16.nxv32i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vssra_vx_i16m8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv32i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv1i32.nxv1i32( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv32i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vssra_vv_i32mf2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i32.nxv1i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.nxv1i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv1i32.nxv1i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vssra_vx_i32mf2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv2i32.nxv2i32( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv1i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vssra_vv_i32m1( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i32.nxv2i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.nxv2i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv2i32.nxv2i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vssra_vx_i32m1( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv4i32.nxv4i32( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv2i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vssra_vv_i32m2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i32.nxv4i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.nxv4i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv4i32.nxv4i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vssra_vx_i32m2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv8i32.nxv8i32( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv4i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vssra_vv_i32m4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i32.nxv8i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.nxv8i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv8i32.nxv8i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vssra_vx_i32m4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv16i32.nxv16i32( - , - , - , - i32); +declare @llvm.riscv.vssra.nxv8i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vssra_vv_i32m8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i32.nxv16i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv16i32.nxv16i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssra.nxv1i8( - , - , - i32, - i32); - -define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv1i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv1i8( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssra.nxv2i8( - , - , - i32, - i32); - -define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv2i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.nxv16i32.nxv16i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8: +define @test_vssra_vx_i32m8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssra.nxv4i8( - , - , - i32, - i32); - -define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.nxv16i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8: +define @test_vssra_vv_i64m1( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv8i8( - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv1i64.nxv1i64.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8: +define @test_vssra_vx_i64m1( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i64.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.nxv1i64.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8: +define @test_vssra_vv_i64m2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv16i8( - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv2i64.nxv2i64.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8: +define @test_vssra_vx_i64m2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i64.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.nxv2i64.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8: +define @test_vssra_vv_i64m4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv32i8( - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv4i64.nxv4i64.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8: +define @test_vssra_vx_i64m4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i64.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.nxv4i64.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8: +define @test_vssra_vv_i64m8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv64i8( - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv8i64.nxv8i64.i32(, , , i32 immarg, i32) -define @intrinsic_vssra_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8: +define @test_vssra_vx_i64m8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv64i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv64i8( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i64.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv1i16( - , - , - i32, - i32); +declare @llvm.riscv.vssra.nxv8i64.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssra_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16: +define @test_vssra_vv_i8mf8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv1i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv1i16( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssra.nxv2i16( - , - , - i32, - i32); - -define @intrinsic_vssra_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16: +define @test_vssra_vx_i8mf8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv4i16( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv1i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16: +define @test_vssra_vv_i8mf4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16: +define @test_vssra_vx_i8mf4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv8i16( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv2i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16: +define @test_vssra_vv_i8mf2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16: +define @test_vssra_vx_i8mf2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv16i16( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv4i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16: +define @test_vssra_vv_i8m1_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16: +define @test_vssra_vx_i8m1_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv32i16( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv8i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16: +define @test_vssra_vv_i8m2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16: +define @test_vssra_vx_i8m2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv1i32( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv16i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32: +define @test_vssra_vv_i8m4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32: +define @test_vssra_vx_i8m4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssra.nxv2i32( - , - , - i32, - i32); - -define @intrinsic_vssra_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv32i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv32i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32: +define @test_vssra_vv_i8m8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv4i32( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32: +define @test_vssra_vx_i8m8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv64i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv64i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32: +define @test_vssra_vv_i16mf4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv8i32( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32: +define @test_vssra_vx_i16mf4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv1i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32: +define @test_vssra_vv_i16mf2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv16i32( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32: +define @test_vssra_vx_i16mf2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv2i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32: +define @test_vssra_vv_i16m1_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv1i64( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64: +define @test_vssra_vx_i16m1_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i64( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv4i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64: +define @test_vssra_vv_i16m2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv2i64( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64: +define @test_vssra_vx_i16m2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i64( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv8i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64: +define @test_vssra_vv_i16m4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv4i64( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64: +define @test_vssra_vx_i16m4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i64( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv16i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64: +define @test_vssra_vv_i16m8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.nxv8i64( - , - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64: +define @test_vssra_vx_i16m8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i64( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv32i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssra.mask.nxv32i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64: +define @test_vssra_vv_i32mf2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv1i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8: +define @test_vssra_vx_i32mf2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv2i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv1i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8: +define @test_vssra_vv_i32m1_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv4i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8: +define @test_vssra_vx_i32m1_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv8i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv2i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8: +define @test_vssra_vv_i32m2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv16i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8: +define @test_vssra_vx_i32m2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv32i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv4i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8: +define @test_vssra_vv_i32m4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv64i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16: +define @test_vssra_vx_i32m4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv8i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16: +define @test_vssra_vv_i32m8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16: +define @test_vssra_vx_i32m8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv16i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16: +define @test_vssra_vv_i64m1_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16: +define @test_vssra_vx_i64m1_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i64.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv1i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16: +define @test_vssra_vv_i64m2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32: +define @test_vssra_vx_i64m2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i64.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv2i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32: +define @test_vssra_vv_i64m4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32: +define @test_vssra_vx_i64m4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i64.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv4i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32: +define @test_vssra_vv_i64m8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32: +define @test_vssra_vx_i64m8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i64.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv8i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -1,2800 +1,1235 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vssra.nxv1i8.nxv1i8( - , - , - , - i64); -define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vssra_vv_i8mf8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vssra_vx_i8mf8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv2i8.nxv2i8( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv1i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vssra_vv_i8mf4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vssra_vx_i8mf4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv4i8.nxv4i8( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv2i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vssra_vv_i8mf2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vssra_vx_i8mf2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv8i8.nxv8i8( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv4i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vssra_vv_i8m1( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vssra_vx_i8m1( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv16i8.nxv16i8( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv8i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vssra_vv_i8m2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vssra_vx_i8m2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv32i8.nxv32i8( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv16i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vssra_vv_i8m4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vssra_vx_i8m4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv32i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv64i8.nxv64i8( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv32i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vssra_vv_i8m8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vssra_vx_i8m8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv64i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv1i16.nxv1i16( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv64i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vssra_vv_i16mf4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vssra_vx_i16mf4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv2i16.nxv2i16( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv1i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vssra_vv_i16mf2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vssra_vx_i16mf2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv4i16.nxv4i16( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv2i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vssra_vv_i16m1( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vssra_vx_i16m1( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv8i16.nxv8i16( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv4i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vssra_vv_i16m2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vssra_vx_i16m2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv16i16.nxv16i16( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv8i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vssra_vv_i16m4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vssra_vx_i16m4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv32i16.nxv32i16( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv16i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vssra_vv_i16m8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vssra_vx_i16m8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv32i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv1i32.nxv1i32( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv32i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vssra_vv_i32mf2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vssra_vx_i32mf2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv2i32.nxv2i32( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv1i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vssra_vv_i32m1( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vssra_vx_i32m1( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv4i32.nxv4i32( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv2i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vssra_vv_i32m2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vssra_vx_i32m2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv8i32.nxv8i32( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv4i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vssra_vv_i32m4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vssra_vx_i32m4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv16i32.nxv16i32( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv8i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vssra_vv_i32m8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vssra_vx_i32m8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv16i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv1i64.nxv1i64( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv16i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64: +define @test_vssra_vv_i64m1( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64: +define @test_vssra_vx_i64m1( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv1i64.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv2i64.nxv2i64( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv1i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64: +define @test_vssra_vv_i64m2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64: +define @test_vssra_vx_i64m2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv2i64.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv4i64.nxv4i64( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv2i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64: +define @test_vssra_vv_i64m4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64: +define @test_vssra_vx_i64m4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.nxv4i64.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.nxv8i64.nxv8i64( - , - , - , - i64); +declare @llvm.riscv.vssra.nxv4i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64: +define @test_vssra_vv_i64m8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(, , , i64 immarg, i64) -define @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64: +define @test_vssra_vx_i64m8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) + %0 = call @llvm.riscv.vssra.nxv8i64.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 +} + +declare @llvm.riscv.vssra.nxv8i64.i64.i64(, , i64, i64 immarg, i64) - ret %a +define @test_vssra_vv_i8mf8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf8_m: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %0 = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv1i8( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8: +define @test_vssra_vx_i8mf8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv1i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8: +define @test_vssra_vv_i8mf4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv2i8( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8: +define @test_vssra_vx_i8mf4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv2i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8: +define @test_vssra_vv_i8mf2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv4i8( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8: +define @test_vssra_vx_i8mf2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv4i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8: +define @test_vssra_vv_i8m1_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv8i8( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8: +define @test_vssra_vx_i8m1_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv8i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8: +define @test_vssra_vv_i8m2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv16i8( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8: +define @test_vssra_vx_i8m2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv16i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8: +define @test_vssra_vv_i8m4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv32i8( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8: +define @test_vssra_vx_i8m4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv32i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8: +define @test_vssra_vv_i8m8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i8m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv64i8( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8: +define @test_vssra_vx_i8m8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i8m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv64i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv64i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8: +define @test_vssra_vv_i16mf4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv1i16( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16: +define @test_vssra_vx_i16mf4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16mf4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv1i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16: +define @test_vssra_vv_i16mf2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv2i16( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16: +define @test_vssra_vx_i16mf2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv2i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16: +define @test_vssra_vv_i16m1_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv4i16( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16: +define @test_vssra_vx_i16m1_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv4i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16: +define @test_vssra_vv_i16m2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv8i16( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16: +define @test_vssra_vx_i16m2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv8i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16: +define @test_vssra_vv_i16m4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv16i16( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16: +define @test_vssra_vx_i16m4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv16i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16: +define @test_vssra_vv_i16m8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i16m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv32i16( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16: +define @test_vssra_vx_i16m8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i16m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv32i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv32i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16: +define @test_vssra_vv_i32mf2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv1i32( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32: +define @test_vssra_vx_i32mf2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv1i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32: +define @test_vssra_vv_i32m1_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv2i32( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32: +define @test_vssra_vx_i32m1_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv2i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32: +define @test_vssra_vv_i32m2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv4i32( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32: +define @test_vssra_vx_i32m2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv4i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32: +define @test_vssra_vv_i32m4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv8i32( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32: +define @test_vssra_vx_i32m4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv8i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv8i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32: +define @test_vssra_vv_i32m8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i32m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv16i32( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32: +define @test_vssra_vx_i32m8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i32m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv16i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv16i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32: +define @test_vssra_vv_i64m1_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv1i64( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64: +define @test_vssra_vx_i64m1_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv1i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64: +define @test_vssra_vv_i64m2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv2i64( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64: +define @test_vssra_vx_i64m2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv2i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv2i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64: +define @test_vssra_vv_i64m4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv4i64( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64: +define @test_vssra_vx_i64m4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv4i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv4i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64: +define @test_vssra_vv_i64m8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vv_i64m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssra.nxv8i64( - , - , - i64, - i64); +declare @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssra_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64: +define @test_vssra_vx_i64m8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssra_vx_i64m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vssra.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv8i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv8i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssra.nxv1i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a + %0 = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -define @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv2i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv4i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv8i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv16i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv32i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv64i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv1i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv2i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv4i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv8i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv16i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv32i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv1i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv2i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv4i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv8i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv16i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv1i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv2i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv4i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.nxv8i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} +declare @llvm.riscv.vssra.mask.nxv8i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -1,2479 +1,1235 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( - , - , - , - i32); -define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vssrl_vv_u8mf8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i8.nxv1i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vssrl_vx_u8mf8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.nxv2i8( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv1i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vssrl_vv_u8mf4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i8.nxv2i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vssrl_vx_u8mf4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.nxv4i8( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv2i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vssrl_vv_u8mf2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i8.nxv4i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vssrl_vx_u8mf2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.nxv8i8( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv4i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vssrl_vv_u8m1( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i8.nxv8i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vssrl_vx_u8m1( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.nxv16i8( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv8i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vssrl_vv_u8m2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i8.nxv16i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vssrl_vx_u8m2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.nxv32i8( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv16i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vssrl_vv_u8m4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i8.nxv32i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vssrl_vx_u8m4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv32i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.nxv64i8( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv32i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vssrl_vv_u8m8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv64i8.nxv64i8( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vssrl_vx_u8m8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv64i8.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.nxv1i16( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv64i8.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vssrl_vv_u16mf4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i16.nxv1i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vssrl_vx_u16mf4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.nxv2i16( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv1i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vssrl_vv_u16mf2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i16.nxv2i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vssrl_vx_u16mf2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.nxv4i16( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv2i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vssrl_vv_u16m1( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i16.nxv4i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vssrl_vx_u16m1( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.nxv8i16( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv4i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vssrl_vv_u16m2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i16.nxv8i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vssrl_vx_u16m2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.nxv16i16( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv8i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vssrl_vv_u16m4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i16.nxv16i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vssrl_vx_u16m4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.nxv32i16( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv16i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vssrl_vv_u16m8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i16.nxv32i16( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vssrl_vx_u16m8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv32i16.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.nxv1i32( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv32i16.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vssrl_vv_u32mf2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i32.nxv1i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vssrl_vx_u32mf2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.nxv2i32( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv1i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vssrl_vv_u32m1( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i32.nxv2i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vssrl_vx_u32m1( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.nxv4i32( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv2i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vssrl_vv_u32m2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i32.nxv4i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vssrl_vx_u32m2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.nxv8i32( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv4i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vssrl_vv_u32m4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i32.nxv8i32( - undef, - %0, - %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32( - , - , - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vssrl_vx_u32m4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.nxv16i32( - , - , - , - i32); +declare @llvm.riscv.vssrl.nxv8i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vssrl_vv_u32m8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i32.nxv16i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv1i8( - , - , - i32, - i32); - -define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv1i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv1i8( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv2i8( - , - , - i32, - i32); - -define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv2i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8: +define @test_vssrl_vx_u32m8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv4i8( - , - , - i32, - i32); - -define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i32.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv16i32.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8: +define @test_vssrl_vv_u64m1( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i8( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8: +define @test_vssrl_vx_u64m1( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i64.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv1i64.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8: +define @test_vssrl_vv_u64m2( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i8( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8: +define @test_vssrl_vx_u64m2( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i64.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv2i64.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8: +define @test_vssrl_vv_u64m4( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv32i8( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8: +define @test_vssrl_vx_u64m4( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i64.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv4i64.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8: +define @test_vssrl_vv_u64m8( %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32( poison, %op1, %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv64i8( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(, , , i32 immarg, i32) -define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8: +define @test_vssrl_vx_u64m8( %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv64i8( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv64i8( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i64.i32.i32( poison, %op1, i32 %shift, i32 0, i32 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i16( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.nxv8i64.i32.i32(, , i32, i32 immarg, i32) -define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16: +define @test_vssrl_vv_u8mf8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv1i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv1i16( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv2i16( - , - , - i32, - i32); - -define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16: +define @test_vssrl_vx_u8mf8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i16( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16: +define @test_vssrl_vv_u8mf4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16: +define @test_vssrl_vx_u8mf4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i16( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16: +define @test_vssrl_vv_u8mf2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16: +define @test_vssrl_vx_u8mf2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i16( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16: +define @test_vssrl_vv_u8m1_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16: +define @test_vssrl_vx_u8m1_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv32i16( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16: +define @test_vssrl_vv_u8m2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i16( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16: +define @test_vssrl_vx_u8m2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i32( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32: +define @test_vssrl_vv_u8m4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32: +define @test_vssrl_vx_u8m4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv2i32( - , - , - i32, - i32); - -define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv32i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32: +define @test_vssrl_vv_u8m8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i32( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32: +define @test_vssrl_vx_u8m8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv64i8.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32: +define @test_vssrl_vv_u16mf4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i32( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32: +define @test_vssrl_vx_u16mf4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32: +define @test_vssrl_vv_u16mf2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i32( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32: +define @test_vssrl_vx_u16mf2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32: +define @test_vssrl_vv_u16m1_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i64( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64: +define @test_vssrl_vx_u16m1_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i64( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64: +define @test_vssrl_vv_u16m2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i64( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64: +define @test_vssrl_vx_u16m2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i64( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64: +define @test_vssrl_vv_u16m4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i64( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64: +define @test_vssrl_vx_u16m4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i64( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64: +define @test_vssrl_vv_u16m8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i64( - , - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64: +define @test_vssrl_vx_u16m8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i64( - undef, - %0, - i32 %1, - i32 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv32i16.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64( - , - , - i32, - , - i32, - i32); +declare @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64: +define @test_vssrl_vv_u32mf2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv1i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8: +define @test_vssrl_vx_u32mf2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv2i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8: +define @test_vssrl_vv_u32m1_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv4i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8: +define @test_vssrl_vx_u32m1_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv8i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8: +define @test_vssrl_vv_u32m2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv16i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8: +define @test_vssrl_vx_u32m2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv32i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8: +define @test_vssrl_vv_u32m4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv64i8( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16: +define @test_vssrl_vx_u32m4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16: +define @test_vssrl_vv_u32m8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16: +define @test_vssrl_vx_u32m8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i32.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16: +define @test_vssrl_vv_u64m1_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16: +define @test_vssrl_vx_u64m1_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i64.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16: +define @test_vssrl_vv_u64m2_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i16( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32: +define @test_vssrl_vx_u64m2_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i64.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32: +define @test_vssrl_vv_u64m4_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32: +define @test_vssrl_vx_u64m4_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i64.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32: +define @test_vssrl_vv_u64m8_m( %mask, %op1, %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32( poison, %op1, %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(, , , , i32 immarg, i32, i32 immarg) -define @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32: +define @test_vssrl_vx_u64m8_m( %mask, %op1, i32 %shift, i32 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i64.i32.i32( poison, %op1, i32 %shift, %mask, i32 0, i32 %vl, i32 3) + ret %0 } -define @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -1,2800 +1,1235 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( - , - , - , - i64); -define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vssrl_vv_u8mf8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: +define @test_vssrl_vx_u8mf8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.nxv2i8( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv1i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vssrl_vv_u8mf4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8: +define @test_vssrl_vx_u8mf4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.nxv4i8( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv2i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vssrl_vv_u8mf2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8: +define @test_vssrl_vx_u8mf2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.nxv8i8( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv4i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vssrl_vv_u8m1( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: +define @test_vssrl_vx_u8m1( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.nxv16i8( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv8i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vssrl_vv_u8m2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: +define @test_vssrl_vx_u8m2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.nxv32i8( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv16i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vssrl_vv_u8m4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: +define @test_vssrl_vx_u8m4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv32i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.nxv64i8( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv32i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vssrl_vv_u8m8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: +define @test_vssrl_vx_u8m8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv64i8.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.nxv1i16( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv64i8.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vssrl_vv_u16mf4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16: +define @test_vssrl_vx_u16mf4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.nxv2i16( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv1i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vssrl_vv_u16mf2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16: +define @test_vssrl_vx_u16mf2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.nxv4i16( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv2i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vssrl_vv_u16m1( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: +define @test_vssrl_vx_u16m1( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.nxv8i16( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv4i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vssrl_vv_u16m2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: +define @test_vssrl_vx_u16m2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.nxv16i16( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv8i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vssrl_vv_u16m4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: +define @test_vssrl_vx_u16m4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.nxv32i16( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv16i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vssrl_vv_u16m8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: +define @test_vssrl_vx_u16m8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv32i16.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.nxv1i32( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv32i16.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vssrl_vv_u32mf2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32: +define @test_vssrl_vx_u32mf2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.nxv2i32( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv1i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vssrl_vv_u32m1( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: +define @test_vssrl_vx_u32m1( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.nxv4i32( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv2i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vssrl_vv_u32m2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: +define @test_vssrl_vx_u32m2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.nxv8i32( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv4i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vssrl_vv_u32m4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: +define @test_vssrl_vx_u32m4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.nxv16i32( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv8i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vssrl_vv_u32m8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: +define @test_vssrl_vx_u32m8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv16i32.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.nxv1i64( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv16i32.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64: +define @test_vssrl_vv_u64m1( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64: +define @test_vssrl_vx_u64m1( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv1i64.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.nxv2i64( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv1i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64: +define @test_vssrl_vv_u64m2( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64: +define @test_vssrl_vx_u64m2( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv2i64.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.nxv4i64( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv2i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64: +define @test_vssrl_vv_u64m4( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64: +define @test_vssrl_vx_u64m4( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv4i64.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.nxv8i64( - , - , - , - i64); +declare @llvm.riscv.vssrl.nxv4i64.i64.i64(, , i64, i64 immarg, i64) -define @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64: +define @test_vssrl_vv_u64m8( %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( poison, %op1, %shift, i64 0, i64 %vl) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); +declare @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(, , , i64 immarg, i64) -define @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: +define @test_vssrl_vx_u64m8( %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) + %0 = call @llvm.riscv.vssrl.nxv8i64.i64.i64( poison, %op1, i64 %shift, i64 0, i64 %vl) + ret %0 +} + +declare @llvm.riscv.vssrl.nxv8i64.i64.i64(, , i64, i64 immarg, i64) - ret %a +define @test_vssrl_vv_u8mf8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf8_m: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %0 = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i8( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8: +define @test_vssrl_vx_u8mf8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8: +define @test_vssrl_vv_u8mf4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i8( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8: +define @test_vssrl_vx_u8mf4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8: +define @test_vssrl_vv_u8mf2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i8( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8: +define @test_vssrl_vx_u8mf2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8: +define @test_vssrl_vv_u8m1_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i8( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8: +define @test_vssrl_vx_u8m1_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8: +define @test_vssrl_vv_u8m2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i8( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8: +define @test_vssrl_vx_u8m2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8: +define @test_vssrl_vv_u8m4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv32i8( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8: +define @test_vssrl_vx_u8m4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8: +define @test_vssrl_vv_u8m8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u8m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv64i8( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8: +define @test_vssrl_vx_u8m8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u8m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv64i8( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8: +define @test_vssrl_vv_u16mf4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16mf4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i16( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16: +define @test_vssrl_vx_u16mf4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16mf4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16: +define @test_vssrl_vv_u16mf2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i16( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16: +define @test_vssrl_vx_u16mf2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16: +define @test_vssrl_vv_u16m1_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i16( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16: +define @test_vssrl_vx_u16m1_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16: +define @test_vssrl_vv_u16m2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i16( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16: +define @test_vssrl_vx_u16m2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16: +define @test_vssrl_vv_u16m4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i16( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16: +define @test_vssrl_vx_u16m4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16: +define @test_vssrl_vv_u16m8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u16m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv32i16( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16: +define @test_vssrl_vx_u16m8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u16m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv32i16( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16: +define @test_vssrl_vv_u32mf2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32mf2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i32( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32: +define @test_vssrl_vx_u32mf2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32mf2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32: +define @test_vssrl_vv_u32m1_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i32( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32: +define @test_vssrl_vx_u32m1_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32: +define @test_vssrl_vv_u32m2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i32( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32: +define @test_vssrl_vx_u32m2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32: +define @test_vssrl_vv_u32m4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i32( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32: +define @test_vssrl_vx_u32m4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv8i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32: +define @test_vssrl_vv_u32m8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u32m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv16i32( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32: +define @test_vssrl_vx_u32m8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u32m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv16i32( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32: +define @test_vssrl_vv_u64m1_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m1_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv1i64( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64: +define @test_vssrl_vx_u64m1_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m1_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64: +define @test_vssrl_vv_u64m2_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m2_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv2i64( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64: +define @test_vssrl_vx_u64m2_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m2_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv2i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64: +define @test_vssrl_vv_u64m4_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m4_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv4i64( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64: +define @test_vssrl_vx_u64m4_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m4_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv4i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64( - , - , - i64, - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64: +define @test_vssrl_vv_u64m8_m( %mask, %op1, %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vv_u64m8_m: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( poison, %op1, %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -declare @llvm.riscv.vssrl.nxv8i64( - , - , - i64, - i64); +declare @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(, , , , i64 immarg, i64, i64 immarg) -define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64: +define @test_vssrl_vx_u64m8_m( %mask, %op1, i64 %shift, i64 %vl) { +; CHECK-LABEL: test_vssrl_vx_u64m8_m: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vssrl.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv8i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv8i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 +; CHECK-NEXT: csrwi vxrm, 0 +; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vssrl.nxv1i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a + %0 = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( poison, %op1, i64 %shift, %mask, i64 0, i64 %vl, i64 3) + ret %0 } -define @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv2i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv4i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv8i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv16i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv32i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv64i8( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv1i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv2i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv4i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv8i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv16i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv32i16( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv1i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv2i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv4i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv8i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv16i32( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv1i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv2i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv4i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.nxv8i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} +declare @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg)