diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -570,6 +570,17 @@ let ScalarOperand = 2; let VLOperand = 3; } + // For Saturating binary operations with rounding-mode operand + // The destination vector type is the same as first source vector. + // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) + class RISCVSaturatingBinaryAAXUnMaskedRoundingMode + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty, LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 2; + let VLOperand = 4; + } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) @@ -582,6 +593,18 @@ let ScalarOperand = 2; let VLOperand = 4; } + // For Saturating binary operations with mask and rounding-mode operand + // The destination vector type is the same as first source vector. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) + class RISCVSaturatingBinaryAAXMaskedRoundingMode + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>, LLVMMatchType<2>], + [ImmArg>, ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 2; + let VLOperand = 5; + } // For Saturating binary operations. // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. @@ -1058,6 +1081,10 @@ def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked; def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked; } + multiclass RISCVSaturatingBinaryAAXRoundingMode { + def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode; + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode; + } multiclass RISCVSaturatingBinaryAAShift { def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMasked; def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMasked; @@ -1339,6 +1366,14 @@ defm vasubu : RISCVSaturatingBinaryAAX; defm vasub : RISCVSaturatingBinaryAAX; + // Note: These intrinsics has an additional `.rm` to their name compared to + // their existing ones. These intrinsics has an additional operand that models + // the rounding mode and will replace the existing ones in the next commit. + defm vaaddu_rm : RISCVSaturatingBinaryAAXRoundingMode; + defm vaadd_rm : RISCVSaturatingBinaryAAXRoundingMode; + defm vasubu_rm : RISCVSaturatingBinaryAAXRoundingMode; + defm vasub_rm : RISCVSaturatingBinaryAAXRoundingMode; + defm vsmul : RISCVSaturatingBinaryAAX; defm vssrl : RISCVSaturatingBinaryAAShift; diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -27,6 +27,7 @@ RISCVFrameLowering.cpp RISCVGatherScatterLowering.cpp RISCVInsertVSETVLI.cpp + RISCVInsertReadWriteCSR.cpp RISCVInstrInfo.cpp RISCVISelDAGToDAG.cpp RISCVISelLowering.cpp diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -107,6 +107,9 @@ // in bits 63:31. Used by the SExtWRemoval pass. IsSignExtendingOpWShift = UsesMaskPolicyShift + 1, IsSignExtendingOpWMask = 1ULL << IsSignExtendingOpWShift, + + HasRoundModeOpShift = IsSignExtendingOpWShift + 1, + HasRoundModeOpMask = 1 << HasRoundModeOpShift, }; enum VLMUL : uint8_t { @@ -164,6 +167,11 @@ return TSFlags & UsesMaskPolicyMask; } +/// \returns true if there is a rounding mode operand for this instruction +static inline bool hasRoundModeOp(uint64_t TSFlags) { + return TSFlags & HasRoundModeOpMask; +} + static inline unsigned getVLOpNum(const MCInstrDesc &Desc) { const uint64_t TSFlags = Desc.TSFlags; // This method is only called if we expect to have a VL operand, and all diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h --- a/llvm/lib/Target/RISCV/RISCV.h +++ b/llvm/lib/Target/RISCV/RISCV.h @@ -60,6 +60,9 @@ FunctionPass *createRISCVInsertVSETVLIPass(); void initializeRISCVInsertVSETVLIPass(PassRegistry &); +FunctionPass *createRISCVInsertReadWriteCSRPass(); +void initializeRISCVInsertReadWriteCSRPass(PassRegistry &); + FunctionPass *createRISCVRedundantCopyEliminationPass(); void initializeRISCVRedundantCopyEliminationPass(PassRegistry &); diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp --- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp +++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// +#include "MCTargetDesc/RISCVBaseInfo.h" #include "MCTargetDesc/RISCVInstPrinter.h" #include "MCTargetDesc/RISCVMCExpr.h" #include "MCTargetDesc/RISCVTargetStreamer.h" @@ -646,6 +647,8 @@ --NumOps; if (RISCVII::hasSEWOp(TSFlags)) --NumOps; + if (RISCVII::hasRoundModeOp(TSFlags)) + --NumOps; bool hasVLOutput = RISCV::isFaultFirstLoad(*MI); for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) { diff --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp @@ -0,0 +1,111 @@ +//===-- RISCVInsertReadWriteCSR.cpp - Insert Read/Write of RISC-V CSR -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// This file implements the machine function pass to insert read/write of CSR-s +// of the RISC-V instructions. +// +// Currently the pass implements naive insertion of a write to vxrm before an +// RVV fixed-point instruction. +// +//===----------------------------------------------------------------------===// + +#include "RISCV.h" +#include "RISCVSubtarget.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +using namespace llvm; + +#define DEBUG_TYPE "riscv-insert-read-write-csr" +#define RISCV_INSERT_READ_WRITE_CSR_NAME "RISC-V Insert Read/Write CSR Pass" + +namespace { + +class RISCVInsertReadWriteCSR : public MachineFunctionPass { + const TargetInstrInfo *TII; + +public: + static char ID; + + RISCVInsertReadWriteCSR() : MachineFunctionPass(ID) { + initializeRISCVInsertReadWriteCSRPass(*PassRegistry::getPassRegistry()); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + StringRef getPassName() const override { + return RISCV_INSERT_READ_WRITE_CSR_NAME; + } + +private: + bool emitWriteVXRM(MachineBasicBlock &MBB); + std::optional getRoundModeIdx(const MachineInstr &MI); +}; + +} // end anonymous namespace + +char RISCVInsertReadWriteCSR::ID = 0; + +INITIALIZE_PASS(RISCVInsertReadWriteCSR, DEBUG_TYPE, + RISCV_INSERT_READ_WRITE_CSR_NAME, false, false) + +// This function returns the index to the rounding mode immediate value if any, +// otherwise the function will return None. +std::optional +RISCVInsertReadWriteCSR::getRoundModeIdx(const MachineInstr &MI) { + uint64_t TSFlags = MI.getDesc().TSFlags; + if (!RISCVII::hasRoundModeOp(TSFlags)) + return std::nullopt; + + // The operand order + // ------------------------------------- + // | n-1 (if any) | n-2 | n-3 | n-4 | + // | policy | sew | vl | rm | + // ------------------------------------- + return MI.getNumExplicitOperands() - RISCVII::hasVecPolicyOp(TSFlags) - 3; +} + +// This function inserts a write to vxrm when encountering an RVV fixed-point +// instruction. +bool RISCVInsertReadWriteCSR::emitWriteVXRM(MachineBasicBlock &MBB) { + bool Changed = false; + for (MachineInstr &MI : MBB) { + if (auto RoundModeIdx = getRoundModeIdx(MI)) { + Changed = true; + + unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm(); + BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm)) + .addImm(VXRMImm); + MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false, + /*IsImp*/ true)); + } + } + return Changed; +} + +bool RISCVInsertReadWriteCSR::runOnMachineFunction(MachineFunction &MF) { + // Skip if the vector extension is not enabled. + const RISCVSubtarget &ST = MF.getSubtarget(); + if (!ST.hasVInstructions()) + return false; + + TII = ST.getInstrInfo(); + + bool Changed = false; + + for (MachineBasicBlock &MBB : MF) + Changed |= emitWriteVXRM(MBB); + + return Changed; +} + +FunctionPass *llvm::createRISCVInsertReadWriteCSRPass() { + return new RISCVInsertReadWriteCSR(); +} diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -214,6 +214,9 @@ // in bits 63:31. Used by the SExtWRemoval pass. bit IsSignExtendingOpW = 0; let TSFlags{18} = IsSignExtendingOpW; + + bit HasRoundModeOp = 0; + let TSFlags{19} = HasRoundModeOp; } // Pseudo instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1113,6 +1113,57 @@ let HasVecPolicyOp = 1; } +class VPseudoBinaryNoMaskRoundingMode : + Pseudo<(outs RetClass:$rd), + (ins Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let Constraints = Constraint; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasRoundModeOp = 1; +} + +class VPseudoBinaryNoMaskTURoundingMode : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasRoundModeOp = 1; +} + +class VPseudoBinaryMaskPolicyRoundingMode : + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, + ixlenimm:$sew, ixlenimm:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; + let HasRoundModeOp = 1; +} + // Special version of VPseudoBinaryNoMask where we pretend the first source is // tied to the destination. // This allows maskedoff and rs2 to be the same register. @@ -1892,6 +1943,27 @@ } } +multiclass VPseudoBinaryRoundingMode { + let VLMul = MInfo.value in { + defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + def suffix : VPseudoBinaryNoMaskRoundingMode; + def suffix # "_TU" :VPseudoBinaryNoMaskTURoundingMode; + def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + RISCVMaskedPseudo; + } +} + multiclass VPseudoBinaryM; } +multiclass VPseudoBinaryV_VV_RM { + defm _VV : VPseudoBinaryRoundingMode; +} + // Similar to VPseudoBinaryV_VV, but uses MxListF. multiclass VPseudoBinaryFV_VV { defm _VV : VPseudoBinary; @@ -1974,6 +2050,10 @@ defm "_VX" : VPseudoBinary; } +multiclass VPseudoBinaryV_VX_RM { + defm "_VX" : VPseudoBinaryRoundingMode; +} + multiclass VPseudoVSLD1_VX { foreach m = MxList in { defvar mx = m.MX; @@ -2494,7 +2574,7 @@ } } -multiclass VPseudoVAALU_VV_VX { +multiclass VPseudoVAALU_VV_VX_RM { foreach m = MxList in { defvar mx = m.MX; defvar WriteVAALUV_MX = !cast("WriteVAALUV_" # mx); @@ -2502,9 +2582,9 @@ defvar ReadVAALUV_MX = !cast("ReadVAALUV_" # mx); defvar ReadVAALUX_MX = !cast("ReadVAALUX_" # mx); - defm "" : VPseudoBinaryV_VV, + defm "" : VPseudoBinaryV_VV_RM, Sched<[WriteVAALUV_MX, ReadVAALUV_MX, ReadVAALUV_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_VX, + defm "" : VPseudoBinaryV_VX_RM, Sched<[WriteVAALUX_MX, ReadVAALUV_MX, ReadVAALUX_MX, ReadVMask]>; } } @@ -3941,6 +4021,49 @@ (op2_type op2_kind:$rs2), GPR:$vl, sew, TU_MU)>; +class VPatBinaryNoMaskTARoundingMode : + Pat<(result_type (!cast(intrinsic_name # "_rm") + (result_type (undef)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + VLOpFrag)), + (!cast(inst) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew)>; + +class VPatBinaryNoMaskTURoundingMode : + Pat<(result_type (!cast(intrinsic_name # "_rm") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + VLOpFrag)), + (!cast(inst#"_TU") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew)>; + + // Same as above but source operands are swapped. class VPatBinaryNoMaskSwapped; +class VPatBinaryMaskTARoundingMode : + Pat<(result_type (!cast(intrinsic_name#"_rm_mask") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, sew, (XLenVT timm:$policy))>; + // Same as above but source operands are swapped. class VPatBinaryMaskSwapped; } +multiclass VPatBinaryTARoundingMode +{ + def : VPatBinaryNoMaskTARoundingMode; + def : VPatBinaryNoMaskTURoundingMode; + def : VPatBinaryMaskTARoundingMode; +} + multiclass VPatBinarySwapped; } +multiclass VPatBinaryV_VV_RM vtilist, bit isSEWAware = 0> { + foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in + defm : VPatBinaryTARoundingMode; +} + multiclass VPatBinaryV_VV_INT vtilist> { foreach vti = vtilist in { @@ -4556,6 +4737,21 @@ } } +multiclass VPatBinaryV_VX_RM vtilist, bit isSEWAware = 0> { + foreach vti = vtilist in { + defvar kind = "V"#vti.ScalarSuffix; + let Predicates = GetVTypePredicates.Predicates in + defm : VPatBinaryTARoundingMode; + } +} + multiclass VPatBinaryV_VX_INT vtilist> { foreach vti = vtilist in @@ -4852,6 +5048,11 @@ : VPatBinaryV_VV, VPatBinaryV_VX; +multiclass VPatBinaryV_VV_VX_RM vtilist, bit isSEWAware = 0> + : VPatBinaryV_VV_RM, + VPatBinaryV_VX_RM; + multiclass VPatBinaryV_VX_VI vtilist> : VPatBinaryV_VX, @@ -5595,12 +5796,10 @@ //===----------------------------------------------------------------------===// // 12.2. Vector Single-Width Averaging Add and Subtract //===----------------------------------------------------------------------===// -let Uses = [VXRM], hasSideEffects = 1 in { - defm PseudoVAADDU : VPseudoVAALU_VV_VX; - defm PseudoVAADD : VPseudoVAALU_VV_VX; - defm PseudoVASUBU : VPseudoVAALU_VV_VX; - defm PseudoVASUB : VPseudoVAALU_VV_VX; -} +defm PseudoVAADDU : VPseudoVAALU_VV_VX_RM; +defm PseudoVAADD : VPseudoVAALU_VV_VX_RM; +defm PseudoVASUBU : VPseudoVAALU_VV_VX_RM; +defm PseudoVASUB : VPseudoVAALU_VV_VX_RM; //===----------------------------------------------------------------------===// // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation @@ -6263,10 +6462,14 @@ //===----------------------------------------------------------------------===// // 12.2. Vector Single-Width Averaging Add and Subtract //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaaddu", "PseudoVAADDU", + AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasubu", "PseudoVASUBU", + AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasub", "PseudoVASUB", + AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaadd", "PseudoVAADD", + AllIntegerVectors>; //===----------------------------------------------------------------------===// // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -84,6 +84,7 @@ initializeRISCVPreRAExpandPseudoPass(*PR); initializeRISCVExpandPseudoPass(*PR); initializeRISCVInsertVSETVLIPass(*PR); + initializeRISCVInsertReadWriteCSRPass(*PR); initializeRISCVDAGToDAGISelPass(*PR); initializeRISCVInitUndefPass(*PR); } @@ -370,6 +371,7 @@ if (TM->getOptLevel() != CodeGenOpt::None) addPass(createRISCVMergeBaseOffsetOptPass()); addPass(createRISCVInsertVSETVLIPass()); + addPass(createRISCVInsertReadWriteCSRPass()); } void RISCVPassConfig::addOptimizedRegAlloc() { diff --git a/llvm/test/CodeGen/RISCV/O0-pipeline.ll b/llvm/test/CodeGen/RISCV/O0-pipeline.ll --- a/llvm/test/CodeGen/RISCV/O0-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O0-pipeline.ll @@ -41,6 +41,7 @@ ; CHECK-NEXT: Local Stack Slot Allocation ; CHECK-NEXT: RISC-V Pre-RA pseudo instruction expansion pass ; CHECK-NEXT: RISC-V Insert VSETVLI pass +; CHECK-NEXT: RISC-V Insert Read/Write CSR Pass ; CHECK-NEXT: Eliminate PHI nodes for register allocation ; CHECK-NEXT: Two-Address instruction pass ; CHECK-NEXT: Fast Register Allocator diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll --- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll @@ -108,6 +108,7 @@ ; CHECK-NEXT: RISC-V Pre-RA pseudo instruction expansion pass ; CHECK-NEXT: RISC-V Merge Base Offset ; CHECK-NEXT: RISC-V Insert VSETVLI pass +; CHECK-NEXT: RISC-V Insert Read/Write CSR Pass ; CHECK-NEXT: Detect Dead Lanes ; CHECK-NEXT: RISC-V init undef pass ; CHECK-NEXT: Process Implicit Definitions diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -504,27 +504,29 @@ ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( +declare @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( , , , , iXLen, + iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( undef, %0, %1, %2, - iXLen %3, iXLen 3) + iXLen 1, iXLen %3, iXLen 3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -478,26 +478,28 @@ ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( +declare @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( , , , , iXLen, + iXLen, iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -478,26 +478,28 @@ ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( +declare @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( , , , , iXLen, + iXLen, iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, - iXLen %4, iXLen 2) + iXLen 1, iXLen %4, iXLen 2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -478,26 +478,28 @@ ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( +declare @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( , , , , iXLen, + iXLen, iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, - iXLen %4, iXLen 0) + iXLen 1, iXLen %4, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -101,46 +101,48 @@ ret %a } -declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( +declare @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8( %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } -declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( +declare @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8( %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } @@ -188,46 +190,48 @@ ret %a } -declare @llvm.riscv.vasub.nxv1i8.nxv1i8( +declare @llvm.riscv.vasub.rm.nxv1i8.nxv1i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vasub.rm.nxv1i8.nxv1i8( %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } -declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( +declare @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8( %0, %1, %2, - iXLen %3) + iXLen 0, iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll @@ -3,1855 +3,1935 @@ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( +declare @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( +declare @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( +declare @llvm.riscv.vaadd.rm.nxv2i8.nxv2i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv2i8.nxv2i8( + %a = call @llvm.riscv.vaadd.rm.nxv2i8.nxv2i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8( +declare @llvm.riscv.vaadd.rm.mask.nxv2i8.nxv2i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv2i8.nxv2i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( +declare @llvm.riscv.vaadd.rm.nxv4i8.nxv4i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv4i8.nxv4i8( + %a = call @llvm.riscv.vaadd.rm.nxv4i8.nxv4i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8( +declare @llvm.riscv.vaadd.rm.mask.nxv4i8.nxv4i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv4i8.nxv4i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( +declare @llvm.riscv.vaadd.rm.nxv8i8.nxv8i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv8i8.nxv8i8( + %a = call @llvm.riscv.vaadd.rm.nxv8i8.nxv8i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8( +declare @llvm.riscv.vaadd.rm.mask.nxv8i8.nxv8i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv8i8.nxv8i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( +declare @llvm.riscv.vaadd.rm.nxv16i8.nxv16i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv16i8.nxv16i8( + %a = call @llvm.riscv.vaadd.rm.nxv16i8.nxv16i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8( +declare @llvm.riscv.vaadd.rm.mask.nxv16i8.nxv16i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv16i8.nxv16i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( +declare @llvm.riscv.vaadd.rm.nxv32i8.nxv32i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv32i8.nxv32i8( + %a = call @llvm.riscv.vaadd.rm.nxv32i8.nxv32i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8( +declare @llvm.riscv.vaadd.rm.mask.nxv32i8.nxv32i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv32i8.nxv32i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( +declare @llvm.riscv.vaadd.rm.nxv64i8.nxv64i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv64i8.nxv64i8( + %a = call @llvm.riscv.vaadd.rm.nxv64i8.nxv64i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8( +declare @llvm.riscv.vaadd.rm.mask.nxv64i8.nxv64i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv64i8.nxv64i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( +declare @llvm.riscv.vaadd.rm.nxv1i16.nxv1i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i16.nxv1i16( + %a = call @llvm.riscv.vaadd.rm.nxv1i16.nxv1i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16( +declare @llvm.riscv.vaadd.rm.mask.nxv1i16.nxv1i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i16.nxv1i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( +declare @llvm.riscv.vaadd.rm.nxv2i16.nxv2i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv2i16.nxv2i16( + %a = call @llvm.riscv.vaadd.rm.nxv2i16.nxv2i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16( +declare @llvm.riscv.vaadd.rm.mask.nxv2i16.nxv2i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv2i16.nxv2i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( +declare @llvm.riscv.vaadd.rm.nxv4i16.nxv4i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv4i16.nxv4i16( + %a = call @llvm.riscv.vaadd.rm.nxv4i16.nxv4i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16( +declare @llvm.riscv.vaadd.rm.mask.nxv4i16.nxv4i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv4i16.nxv4i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( +declare @llvm.riscv.vaadd.rm.nxv8i16.nxv8i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv8i16.nxv8i16( + %a = call @llvm.riscv.vaadd.rm.nxv8i16.nxv8i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16( +declare @llvm.riscv.vaadd.rm.mask.nxv8i16.nxv8i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv8i16.nxv8i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( +declare @llvm.riscv.vaadd.rm.nxv16i16.nxv16i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv16i16.nxv16i16( + %a = call @llvm.riscv.vaadd.rm.nxv16i16.nxv16i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16( +declare @llvm.riscv.vaadd.rm.mask.nxv16i16.nxv16i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv16i16.nxv16i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( +declare @llvm.riscv.vaadd.rm.nxv32i16.nxv32i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv32i16.nxv32i16( + %a = call @llvm.riscv.vaadd.rm.nxv32i16.nxv32i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16( +declare @llvm.riscv.vaadd.rm.mask.nxv32i16.nxv32i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv32i16.nxv32i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( +declare @llvm.riscv.vaadd.rm.nxv1i32.nxv1i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i32.nxv1i32( + %a = call @llvm.riscv.vaadd.rm.nxv1i32.nxv1i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32( +declare @llvm.riscv.vaadd.rm.mask.nxv1i32.nxv1i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i32.nxv1i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( +declare @llvm.riscv.vaadd.rm.nxv2i32.nxv2i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv2i32.nxv2i32( + %a = call @llvm.riscv.vaadd.rm.nxv2i32.nxv2i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32( +declare @llvm.riscv.vaadd.rm.mask.nxv2i32.nxv2i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv2i32.nxv2i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( +declare @llvm.riscv.vaadd.rm.nxv4i32.nxv4i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv4i32.nxv4i32( + %a = call @llvm.riscv.vaadd.rm.nxv4i32.nxv4i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32( +declare @llvm.riscv.vaadd.rm.mask.nxv4i32.nxv4i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv4i32.nxv4i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( +declare @llvm.riscv.vaadd.rm.nxv8i32.nxv8i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv8i32.nxv8i32( + %a = call @llvm.riscv.vaadd.rm.nxv8i32.nxv8i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32( +declare @llvm.riscv.vaadd.rm.mask.nxv8i32.nxv8i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv8i32.nxv8i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( +declare @llvm.riscv.vaadd.rm.nxv16i32.nxv16i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv16i32.nxv16i32( + %a = call @llvm.riscv.vaadd.rm.nxv16i32.nxv16i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32( +declare @llvm.riscv.vaadd.rm.mask.nxv16i32.nxv16i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv16i32.nxv16i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( +declare @llvm.riscv.vaadd.rm.nxv1i64.nxv1i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i64.nxv1i64( + %a = call @llvm.riscv.vaadd.rm.nxv1i64.nxv1i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64( +declare @llvm.riscv.vaadd.rm.mask.nxv1i64.nxv1i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i64.nxv1i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( +declare @llvm.riscv.vaadd.rm.nxv2i64.nxv2i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv2i64.nxv2i64( + %a = call @llvm.riscv.vaadd.rm.nxv2i64.nxv2i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64( +declare @llvm.riscv.vaadd.rm.mask.nxv2i64.nxv2i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64( + %a = call @llvm.riscv.vaadd.rm.mask.nxv2i64.nxv2i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( +declare @llvm.riscv.vaadd.rm.nxv4i64.nxv4i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv4i64.nxv4i64( + %a = call @llvm.riscv.vaadd.rm.nxv4i64.nxv4i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64( +declare @llvm.riscv.vaadd.rm.mask.nxv4i64.nxv4i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64( + %a = call @llvm.riscv.vaadd.rm.mask.nxv4i64.nxv4i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( +declare @llvm.riscv.vaadd.rm.nxv8i64.nxv8i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv8i64.nxv8i64( + %a = call @llvm.riscv.vaadd.rm.nxv8i64.nxv8i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64( +declare @llvm.riscv.vaadd.rm.mask.nxv8i64.nxv8i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64( + %a = call @llvm.riscv.vaadd.rm.mask.nxv8i64.nxv8i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv1i8.i8( +declare @llvm.riscv.vaadd.rm.nxv1i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i8.i8( + %a = call @llvm.riscv.vaadd.rm.nxv1i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.i8( +declare @llvm.riscv.vaadd.rm.mask.nxv1i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i8.i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv2i8.i8( +declare @llvm.riscv.vaadd.rm.nxv2i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv2i8.i8( + %a = call @llvm.riscv.vaadd.rm.nxv2i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i8.i8( +declare @llvm.riscv.vaadd.rm.mask.nxv2i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv2i8.i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv2i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv4i8.i8( +declare @llvm.riscv.vaadd.rm.nxv4i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv4i8.i8( + %a = call @llvm.riscv.vaadd.rm.nxv4i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i8.i8( +declare @llvm.riscv.vaadd.rm.mask.nxv4i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv4i8.i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv4i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv8i8.i8( +declare @llvm.riscv.vaadd.rm.nxv8i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv8i8.i8( + %a = call @llvm.riscv.vaadd.rm.nxv8i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i8.i8( +declare @llvm.riscv.vaadd.rm.mask.nxv8i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv8i8.i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv8i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv16i8.i8( +declare @llvm.riscv.vaadd.rm.nxv16i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv16i8.i8( + %a = call @llvm.riscv.vaadd.rm.nxv16i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i8.i8( +declare @llvm.riscv.vaadd.rm.mask.nxv16i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv16i8.i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv16i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv32i8.i8( +declare @llvm.riscv.vaadd.rm.nxv32i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv32i8.i8( + %a = call @llvm.riscv.vaadd.rm.nxv32i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i8.i8( +declare @llvm.riscv.vaadd.rm.mask.nxv32i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv32i8.i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv32i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv64i8.i8( +declare @llvm.riscv.vaadd.rm.nxv64i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv64i8.i8( + %a = call @llvm.riscv.vaadd.rm.nxv64i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv64i8.i8( +declare @llvm.riscv.vaadd.rm.mask.nxv64i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv64i8.i8( + %a = call @llvm.riscv.vaadd.rm.mask.nxv64i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv1i16.i16( +declare @llvm.riscv.vaadd.rm.nxv1i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i16.i16( + %a = call @llvm.riscv.vaadd.rm.nxv1i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i16.i16( +declare @llvm.riscv.vaadd.rm.mask.nxv1i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i16.i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv2i16.i16( +declare @llvm.riscv.vaadd.rm.nxv2i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv2i16.i16( + %a = call @llvm.riscv.vaadd.rm.nxv2i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i16.i16( +declare @llvm.riscv.vaadd.rm.mask.nxv2i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv2i16.i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv2i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv4i16.i16( +declare @llvm.riscv.vaadd.rm.nxv4i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv4i16.i16( + %a = call @llvm.riscv.vaadd.rm.nxv4i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i16.i16( +declare @llvm.riscv.vaadd.rm.mask.nxv4i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv4i16.i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv4i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv8i16.i16( +declare @llvm.riscv.vaadd.rm.nxv8i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv8i16.i16( + %a = call @llvm.riscv.vaadd.rm.nxv8i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i16.i16( +declare @llvm.riscv.vaadd.rm.mask.nxv8i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv8i16.i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv8i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv16i16.i16( +declare @llvm.riscv.vaadd.rm.nxv16i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv16i16.i16( + %a = call @llvm.riscv.vaadd.rm.nxv16i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i16.i16( +declare @llvm.riscv.vaadd.rm.mask.nxv16i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv16i16.i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv16i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv32i16.i16( +declare @llvm.riscv.vaadd.rm.nxv32i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv32i16.i16( + %a = call @llvm.riscv.vaadd.rm.nxv32i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i16.i16( +declare @llvm.riscv.vaadd.rm.mask.nxv32i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv32i16.i16( + %a = call @llvm.riscv.vaadd.rm.mask.nxv32i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv1i32.i32( +declare @llvm.riscv.vaadd.rm.nxv1i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i32.i32( + %a = call @llvm.riscv.vaadd.rm.nxv1i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i32.i32( +declare @llvm.riscv.vaadd.rm.mask.nxv1i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i32.i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv2i32.i32( +declare @llvm.riscv.vaadd.rm.nxv2i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv2i32.i32( + %a = call @llvm.riscv.vaadd.rm.nxv2i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i32.i32( +declare @llvm.riscv.vaadd.rm.mask.nxv2i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv2i32.i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv2i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv4i32.i32( +declare @llvm.riscv.vaadd.rm.nxv4i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv4i32.i32( + %a = call @llvm.riscv.vaadd.rm.nxv4i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i32.i32( +declare @llvm.riscv.vaadd.rm.mask.nxv4i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv4i32.i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv4i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv8i32.i32( +declare @llvm.riscv.vaadd.rm.nxv8i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv8i32.i32( + %a = call @llvm.riscv.vaadd.rm.nxv8i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i32.i32( +declare @llvm.riscv.vaadd.rm.mask.nxv8i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv8i32.i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv8i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv16i32.i32( +declare @llvm.riscv.vaadd.rm.nxv16i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv16i32.i32( + %a = call @llvm.riscv.vaadd.rm.nxv16i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i32.i32( +declare @llvm.riscv.vaadd.rm.mask.nxv16i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv16i32.i32( + %a = call @llvm.riscv.vaadd.rm.mask.nxv16i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv1i64.i64( +declare @llvm.riscv.vaadd.rm.nxv1i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64: @@ -1862,6 +1942,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1869,24 +1950,25 @@ ; RV64-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv1i64.i64( + %a = call @llvm.riscv.vaadd.rm.nxv1i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i64.i64( +declare @llvm.riscv.vaadd.rm.mask.nxv1i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64: @@ -1897,6 +1979,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaadd.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1904,24 +1987,25 @@ ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaadd.vx v8, v9, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv1i64.i64( + %a = call @llvm.riscv.vaadd.rm.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv2i64.i64( +declare @llvm.riscv.vaadd.rm.nxv2i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64: @@ -1932,6 +2016,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1939,24 +2024,25 @@ ; RV64-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv2i64.i64( + %a = call @llvm.riscv.vaadd.rm.nxv2i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i64.i64( +declare @llvm.riscv.vaadd.rm.mask.nxv2i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64: @@ -1967,6 +2053,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaadd.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1974,24 +2061,25 @@ ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaadd.vx v8, v10, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv2i64.i64( + %a = call @llvm.riscv.vaadd.rm.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv4i64.i64( +declare @llvm.riscv.vaadd.rm.nxv4i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64: @@ -2002,6 +2090,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2009,24 +2098,25 @@ ; RV64-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv4i64.i64( + %a = call @llvm.riscv.vaadd.rm.nxv4i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i64.i64( +declare @llvm.riscv.vaadd.rm.mask.nxv4i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64: @@ -2037,6 +2127,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaadd.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2044,24 +2135,25 @@ ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaadd.vx v8, v12, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv4i64.i64( + %a = call @llvm.riscv.vaadd.rm.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaadd.nxv8i64.i64( +declare @llvm.riscv.vaadd.rm.nxv8i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64: @@ -2072,6 +2164,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2079,24 +2172,25 @@ ; RV64-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.nxv8i64.i64( + %a = call @llvm.riscv.vaadd.rm.nxv8i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i64.i64( +declare @llvm.riscv.vaadd.rm.mask.nxv8i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64: @@ -2107,6 +2201,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaadd.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2114,15 +2209,16 @@ ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaadd.vx v8, v16, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaadd.mask.nxv8i64.i64( + %a = call @llvm.riscv.vaadd.rm.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll @@ -3,1855 +3,1935 @@ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( +declare @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv1i8.nxv1i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( +declare @llvm.riscv.vaaddu.rm.nxv2i8.nxv2i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + %a = call @llvm.riscv.vaaddu.rm.nxv2i8.nxv2i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv2i8.nxv2i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv2i8.nxv2i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( +declare @llvm.riscv.vaaddu.rm.nxv4i8.nxv4i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + %a = call @llvm.riscv.vaaddu.rm.nxv4i8.nxv4i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv4i8.nxv4i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv4i8.nxv4i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( +declare @llvm.riscv.vaaddu.rm.nxv8i8.nxv8i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + %a = call @llvm.riscv.vaaddu.rm.nxv8i8.nxv8i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv8i8.nxv8i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv8i8.nxv8i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( +declare @llvm.riscv.vaaddu.rm.nxv16i8.nxv16i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + %a = call @llvm.riscv.vaaddu.rm.nxv16i8.nxv16i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv16i8.nxv16i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv16i8.nxv16i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( +declare @llvm.riscv.vaaddu.rm.nxv32i8.nxv32i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + %a = call @llvm.riscv.vaaddu.rm.nxv32i8.nxv32i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv32i8.nxv32i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv32i8.nxv32i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( +declare @llvm.riscv.vaaddu.rm.nxv64i8.nxv64i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + %a = call @llvm.riscv.vaaddu.rm.nxv64i8.nxv64i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv64i8.nxv64i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv64i8.nxv64i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( +declare @llvm.riscv.vaaddu.rm.nxv1i16.nxv1i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + %a = call @llvm.riscv.vaaddu.rm.nxv1i16.nxv1i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv1i16.nxv1i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv1i16.nxv1i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( +declare @llvm.riscv.vaaddu.rm.nxv2i16.nxv2i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + %a = call @llvm.riscv.vaaddu.rm.nxv2i16.nxv2i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv2i16.nxv2i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv2i16.nxv2i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( +declare @llvm.riscv.vaaddu.rm.nxv4i16.nxv4i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + %a = call @llvm.riscv.vaaddu.rm.nxv4i16.nxv4i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv4i16.nxv4i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv4i16.nxv4i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( +declare @llvm.riscv.vaaddu.rm.nxv8i16.nxv8i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + %a = call @llvm.riscv.vaaddu.rm.nxv8i16.nxv8i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv8i16.nxv8i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv8i16.nxv8i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( +declare @llvm.riscv.vaaddu.rm.nxv16i16.nxv16i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + %a = call @llvm.riscv.vaaddu.rm.nxv16i16.nxv16i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv16i16.nxv16i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv16i16.nxv16i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( +declare @llvm.riscv.vaaddu.rm.nxv32i16.nxv32i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + %a = call @llvm.riscv.vaaddu.rm.nxv32i16.nxv32i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv32i16.nxv32i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv32i16.nxv32i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( +declare @llvm.riscv.vaaddu.rm.nxv1i32.nxv1i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + %a = call @llvm.riscv.vaaddu.rm.nxv1i32.nxv1i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv1i32.nxv1i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv1i32.nxv1i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( +declare @llvm.riscv.vaaddu.rm.nxv2i32.nxv2i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + %a = call @llvm.riscv.vaaddu.rm.nxv2i32.nxv2i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv2i32.nxv2i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv2i32.nxv2i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( +declare @llvm.riscv.vaaddu.rm.nxv4i32.nxv4i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + %a = call @llvm.riscv.vaaddu.rm.nxv4i32.nxv4i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv4i32.nxv4i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv4i32.nxv4i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( +declare @llvm.riscv.vaaddu.rm.nxv8i32.nxv8i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + %a = call @llvm.riscv.vaaddu.rm.nxv8i32.nxv8i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv8i32.nxv8i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv8i32.nxv8i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( +declare @llvm.riscv.vaaddu.rm.nxv16i32.nxv16i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + %a = call @llvm.riscv.vaaddu.rm.nxv16i32.nxv16i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv16i32.nxv16i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv16i32.nxv16i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( +declare @llvm.riscv.vaaddu.rm.nxv1i64.nxv1i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + %a = call @llvm.riscv.vaaddu.rm.nxv1i64.nxv1i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64( +declare @llvm.riscv.vaaddu.rm.mask.nxv1i64.nxv1i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv1i64.nxv1i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( +declare @llvm.riscv.vaaddu.rm.nxv2i64.nxv2i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + %a = call @llvm.riscv.vaaddu.rm.nxv2i64.nxv2i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64( +declare @llvm.riscv.vaaddu.rm.mask.nxv2i64.nxv2i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv2i64.nxv2i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( +declare @llvm.riscv.vaaddu.rm.nxv4i64.nxv4i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + %a = call @llvm.riscv.vaaddu.rm.nxv4i64.nxv4i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64( +declare @llvm.riscv.vaaddu.rm.mask.nxv4i64.nxv4i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv4i64.nxv4i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( +declare @llvm.riscv.vaaddu.rm.nxv8i64.nxv8i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + %a = call @llvm.riscv.vaaddu.rm.nxv8i64.nxv8i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64( +declare @llvm.riscv.vaaddu.rm.mask.nxv8i64.nxv8i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv8i64.nxv8i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv1i8.i8( +declare @llvm.riscv.vaaddu.rm.nxv1i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i8.i8( + %a = call @llvm.riscv.vaaddu.rm.nxv1i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i8.i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv1i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv1i8.i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv1i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv2i8.i8( +declare @llvm.riscv.vaaddu.rm.nxv2i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv2i8.i8( + %a = call @llvm.riscv.vaaddu.rm.nxv2i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i8.i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv2i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv2i8.i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv2i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv4i8.i8( +declare @llvm.riscv.vaaddu.rm.nxv4i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv4i8.i8( + %a = call @llvm.riscv.vaaddu.rm.nxv4i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i8.i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv4i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv4i8.i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv4i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv8i8.i8( +declare @llvm.riscv.vaaddu.rm.nxv8i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv8i8.i8( + %a = call @llvm.riscv.vaaddu.rm.nxv8i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i8.i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv8i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv8i8.i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv8i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv16i8.i8( +declare @llvm.riscv.vaaddu.rm.nxv16i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv16i8.i8( + %a = call @llvm.riscv.vaaddu.rm.nxv16i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i8.i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv16i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv16i8.i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv16i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv32i8.i8( +declare @llvm.riscv.vaaddu.rm.nxv32i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv32i8.i8( + %a = call @llvm.riscv.vaaddu.rm.nxv32i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i8.i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv32i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv32i8.i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv32i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv64i8.i8( +declare @llvm.riscv.vaaddu.rm.nxv64i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv64i8.i8( + %a = call @llvm.riscv.vaaddu.rm.nxv64i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv64i8.i8( +declare @llvm.riscv.vaaddu.rm.mask.nxv64i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv64i8.i8( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv64i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv1i16.i16( +declare @llvm.riscv.vaaddu.rm.nxv1i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i16.i16( + %a = call @llvm.riscv.vaaddu.rm.nxv1i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i16.i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv1i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv1i16.i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv1i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv2i16.i16( +declare @llvm.riscv.vaaddu.rm.nxv2i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv2i16.i16( + %a = call @llvm.riscv.vaaddu.rm.nxv2i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i16.i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv2i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv2i16.i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv2i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv4i16.i16( +declare @llvm.riscv.vaaddu.rm.nxv4i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv4i16.i16( + %a = call @llvm.riscv.vaaddu.rm.nxv4i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i16.i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv4i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv4i16.i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv4i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv8i16.i16( +declare @llvm.riscv.vaaddu.rm.nxv8i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv8i16.i16( + %a = call @llvm.riscv.vaaddu.rm.nxv8i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i16.i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv8i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv8i16.i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv8i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv16i16.i16( +declare @llvm.riscv.vaaddu.rm.nxv16i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv16i16.i16( + %a = call @llvm.riscv.vaaddu.rm.nxv16i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i16.i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv16i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv16i16.i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv16i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv32i16.i16( +declare @llvm.riscv.vaaddu.rm.nxv32i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv32i16.i16( + %a = call @llvm.riscv.vaaddu.rm.nxv32i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i16.i16( +declare @llvm.riscv.vaaddu.rm.mask.nxv32i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv32i16.i16( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv32i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv1i32.i32( +declare @llvm.riscv.vaaddu.rm.nxv1i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i32.i32( + %a = call @llvm.riscv.vaaddu.rm.nxv1i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i32.i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv1i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv1i32.i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv1i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv2i32.i32( +declare @llvm.riscv.vaaddu.rm.nxv2i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv2i32.i32( + %a = call @llvm.riscv.vaaddu.rm.nxv2i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i32.i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv2i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv2i32.i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv2i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv4i32.i32( +declare @llvm.riscv.vaaddu.rm.nxv4i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv4i32.i32( + %a = call @llvm.riscv.vaaddu.rm.nxv4i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i32.i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv4i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv4i32.i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv4i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv8i32.i32( +declare @llvm.riscv.vaaddu.rm.nxv8i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv8i32.i32( + %a = call @llvm.riscv.vaaddu.rm.nxv8i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i32.i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv8i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv8i32.i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv8i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv16i32.i32( +declare @llvm.riscv.vaaddu.rm.nxv16i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv16i32.i32( + %a = call @llvm.riscv.vaaddu.rm.nxv16i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i32.i32( +declare @llvm.riscv.vaaddu.rm.mask.nxv16i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv16i32.i32( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv16i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv1i64.i64( +declare @llvm.riscv.vaaddu.rm.nxv1i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64: @@ -1862,6 +1942,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1869,24 +1950,25 @@ ; RV64-LABEL: intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaaddu.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv1i64.i64( + %a = call @llvm.riscv.vaaddu.rm.nxv1i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i64.i64( +declare @llvm.riscv.vaaddu.rm.mask.nxv1i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64: @@ -1897,6 +1979,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vaaddu.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1904,24 +1987,25 @@ ; RV64-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vaaddu.vx v8, v9, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv1i64.i64( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv2i64.i64( +declare @llvm.riscv.vaaddu.rm.nxv2i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64: @@ -1932,6 +2016,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1939,24 +2024,25 @@ ; RV64-LABEL: intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaaddu.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv2i64.i64( + %a = call @llvm.riscv.vaaddu.rm.nxv2i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i64.i64( +declare @llvm.riscv.vaaddu.rm.mask.nxv2i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64: @@ -1967,6 +2053,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vaaddu.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1974,24 +2061,25 @@ ; RV64-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vaaddu.vx v8, v10, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv2i64.i64( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv4i64.i64( +declare @llvm.riscv.vaaddu.rm.nxv4i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64: @@ -2002,6 +2090,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2009,24 +2098,25 @@ ; RV64-LABEL: intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaaddu.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv4i64.i64( + %a = call @llvm.riscv.vaaddu.rm.nxv4i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i64.i64( +declare @llvm.riscv.vaaddu.rm.mask.nxv4i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64: @@ -2037,6 +2127,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vaaddu.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2044,24 +2135,25 @@ ; RV64-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vaaddu.vx v8, v12, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv4i64.i64( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vaaddu.nxv8i64.i64( +declare @llvm.riscv.vaaddu.rm.nxv8i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64: @@ -2072,6 +2164,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vaaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2079,24 +2172,25 @@ ; RV64-LABEL: intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vaaddu.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.nxv8i64.i64( + %a = call @llvm.riscv.vaaddu.rm.nxv8i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i64.i64( +declare @llvm.riscv.vaaddu.rm.mask.nxv8i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64: @@ -2107,6 +2201,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vaaddu.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2114,15 +2209,16 @@ ; RV64-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vaaddu.vx v8, v16, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vaaddu.mask.nxv8i64.i64( + %a = call @llvm.riscv.vaaddu.rm.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub.ll b/llvm/test/CodeGen/RISCV/rvv/vasub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub.ll @@ -3,1855 +3,1935 @@ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vasub.nxv1i8.nxv1i8( +declare @llvm.riscv.vasub.rm.nxv1i8.nxv1i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vasub.rm.nxv1i8.nxv1i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv1i8.nxv1i8( +declare @llvm.riscv.vasub.rm.mask.nxv1i8.nxv1i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv2i8.nxv2i8( +declare @llvm.riscv.vasub.rm.nxv2i8.nxv2i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv2i8.nxv2i8( + %a = call @llvm.riscv.vasub.rm.nxv2i8.nxv2i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv2i8.nxv2i8( +declare @llvm.riscv.vasub.rm.mask.nxv2i8.nxv2i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv2i8.nxv2i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv4i8.nxv4i8( +declare @llvm.riscv.vasub.rm.nxv4i8.nxv4i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv4i8.nxv4i8( + %a = call @llvm.riscv.vasub.rm.nxv4i8.nxv4i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv4i8.nxv4i8( +declare @llvm.riscv.vasub.rm.mask.nxv4i8.nxv4i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv4i8.nxv4i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv8i8.nxv8i8( +declare @llvm.riscv.vasub.rm.nxv8i8.nxv8i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv8i8.nxv8i8( + %a = call @llvm.riscv.vasub.rm.nxv8i8.nxv8i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv8i8.nxv8i8( +declare @llvm.riscv.vasub.rm.mask.nxv8i8.nxv8i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv8i8.nxv8i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv16i8.nxv16i8( +declare @llvm.riscv.vasub.rm.nxv16i8.nxv16i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv16i8.nxv16i8( + %a = call @llvm.riscv.vasub.rm.nxv16i8.nxv16i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv16i8.nxv16i8( +declare @llvm.riscv.vasub.rm.mask.nxv16i8.nxv16i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv16i8.nxv16i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv32i8.nxv32i8( +declare @llvm.riscv.vasub.rm.nxv32i8.nxv32i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv32i8.nxv32i8( + %a = call @llvm.riscv.vasub.rm.nxv32i8.nxv32i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv32i8.nxv32i8( +declare @llvm.riscv.vasub.rm.mask.nxv32i8.nxv32i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv32i8.nxv32i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv64i8.nxv64i8( +declare @llvm.riscv.vasub.rm.nxv64i8.nxv64i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv64i8.nxv64i8( + %a = call @llvm.riscv.vasub.rm.nxv64i8.nxv64i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv64i8.nxv64i8( +declare @llvm.riscv.vasub.rm.mask.nxv64i8.nxv64i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv64i8.nxv64i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv1i16.nxv1i16( +declare @llvm.riscv.vasub.rm.nxv1i16.nxv1i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i16.nxv1i16( + %a = call @llvm.riscv.vasub.rm.nxv1i16.nxv1i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv1i16.nxv1i16( +declare @llvm.riscv.vasub.rm.mask.nxv1i16.nxv1i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv1i16.nxv1i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv2i16.nxv2i16( +declare @llvm.riscv.vasub.rm.nxv2i16.nxv2i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv2i16.nxv2i16( + %a = call @llvm.riscv.vasub.rm.nxv2i16.nxv2i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv2i16.nxv2i16( +declare @llvm.riscv.vasub.rm.mask.nxv2i16.nxv2i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv2i16.nxv2i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv4i16.nxv4i16( +declare @llvm.riscv.vasub.rm.nxv4i16.nxv4i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv4i16.nxv4i16( + %a = call @llvm.riscv.vasub.rm.nxv4i16.nxv4i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv4i16.nxv4i16( +declare @llvm.riscv.vasub.rm.mask.nxv4i16.nxv4i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv4i16.nxv4i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv8i16.nxv8i16( +declare @llvm.riscv.vasub.rm.nxv8i16.nxv8i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv8i16.nxv8i16( + %a = call @llvm.riscv.vasub.rm.nxv8i16.nxv8i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv8i16.nxv8i16( +declare @llvm.riscv.vasub.rm.mask.nxv8i16.nxv8i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv8i16.nxv8i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv16i16.nxv16i16( +declare @llvm.riscv.vasub.rm.nxv16i16.nxv16i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv16i16.nxv16i16( + %a = call @llvm.riscv.vasub.rm.nxv16i16.nxv16i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv16i16.nxv16i16( +declare @llvm.riscv.vasub.rm.mask.nxv16i16.nxv16i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv16i16.nxv16i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv32i16.nxv32i16( +declare @llvm.riscv.vasub.rm.nxv32i16.nxv32i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv32i16.nxv32i16( + %a = call @llvm.riscv.vasub.rm.nxv32i16.nxv32i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv32i16.nxv32i16( +declare @llvm.riscv.vasub.rm.mask.nxv32i16.nxv32i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv32i16.nxv32i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv1i32.nxv1i32( +declare @llvm.riscv.vasub.rm.nxv1i32.nxv1i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i32.nxv1i32( + %a = call @llvm.riscv.vasub.rm.nxv1i32.nxv1i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv1i32.nxv1i32( +declare @llvm.riscv.vasub.rm.mask.nxv1i32.nxv1i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv1i32.nxv1i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv2i32.nxv2i32( +declare @llvm.riscv.vasub.rm.nxv2i32.nxv2i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv2i32.nxv2i32( + %a = call @llvm.riscv.vasub.rm.nxv2i32.nxv2i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv2i32.nxv2i32( +declare @llvm.riscv.vasub.rm.mask.nxv2i32.nxv2i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv2i32.nxv2i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv4i32.nxv4i32( +declare @llvm.riscv.vasub.rm.nxv4i32.nxv4i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv4i32.nxv4i32( + %a = call @llvm.riscv.vasub.rm.nxv4i32.nxv4i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv4i32.nxv4i32( +declare @llvm.riscv.vasub.rm.mask.nxv4i32.nxv4i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv4i32.nxv4i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv8i32.nxv8i32( +declare @llvm.riscv.vasub.rm.nxv8i32.nxv8i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv8i32.nxv8i32( + %a = call @llvm.riscv.vasub.rm.nxv8i32.nxv8i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv8i32.nxv8i32( +declare @llvm.riscv.vasub.rm.mask.nxv8i32.nxv8i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv8i32.nxv8i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv16i32.nxv16i32( +declare @llvm.riscv.vasub.rm.nxv16i32.nxv16i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv16i32.nxv16i32( + %a = call @llvm.riscv.vasub.rm.nxv16i32.nxv16i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv16i32.nxv16i32( +declare @llvm.riscv.vasub.rm.mask.nxv16i32.nxv16i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv16i32.nxv16i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv1i64.nxv1i64( +declare @llvm.riscv.vasub.rm.nxv1i64.nxv1i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i64.nxv1i64( + %a = call @llvm.riscv.vasub.rm.nxv1i64.nxv1i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv1i64.nxv1i64( +declare @llvm.riscv.vasub.rm.mask.nxv1i64.nxv1i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64( + %a = call @llvm.riscv.vasub.rm.mask.nxv1i64.nxv1i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv2i64.nxv2i64( +declare @llvm.riscv.vasub.rm.nxv2i64.nxv2i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv2i64.nxv2i64( + %a = call @llvm.riscv.vasub.rm.nxv2i64.nxv2i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv2i64.nxv2i64( +declare @llvm.riscv.vasub.rm.mask.nxv2i64.nxv2i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64( + %a = call @llvm.riscv.vasub.rm.mask.nxv2i64.nxv2i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv4i64.nxv4i64( +declare @llvm.riscv.vasub.rm.nxv4i64.nxv4i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv4i64.nxv4i64( + %a = call @llvm.riscv.vasub.rm.nxv4i64.nxv4i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv4i64.nxv4i64( +declare @llvm.riscv.vasub.rm.mask.nxv4i64.nxv4i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64( + %a = call @llvm.riscv.vasub.rm.mask.nxv4i64.nxv4i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv8i64.nxv8i64( +declare @llvm.riscv.vasub.rm.nxv8i64.nxv8i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv8i64.nxv8i64( + %a = call @llvm.riscv.vasub.rm.nxv8i64.nxv8i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv8i64.nxv8i64( +declare @llvm.riscv.vasub.rm.mask.nxv8i64.nxv8i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64( + %a = call @llvm.riscv.vasub.rm.mask.nxv8i64.nxv8i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv1i8.i8( +declare @llvm.riscv.vasub.rm.nxv1i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i8.i8( + %a = call @llvm.riscv.vasub.rm.nxv1i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv1i8.i8( +declare @llvm.riscv.vasub.rm.mask.nxv1i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv1i8.i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv1i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv2i8.i8( +declare @llvm.riscv.vasub.rm.nxv2i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv2i8.i8( + %a = call @llvm.riscv.vasub.rm.nxv2i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv2i8.i8( +declare @llvm.riscv.vasub.rm.mask.nxv2i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv2i8.i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv2i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv4i8.i8( +declare @llvm.riscv.vasub.rm.nxv4i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv4i8.i8( + %a = call @llvm.riscv.vasub.rm.nxv4i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv4i8.i8( +declare @llvm.riscv.vasub.rm.mask.nxv4i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv4i8.i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv4i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv8i8.i8( +declare @llvm.riscv.vasub.rm.nxv8i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv8i8.i8( + %a = call @llvm.riscv.vasub.rm.nxv8i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv8i8.i8( +declare @llvm.riscv.vasub.rm.mask.nxv8i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv8i8.i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv8i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv16i8.i8( +declare @llvm.riscv.vasub.rm.nxv16i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv16i8.i8( + %a = call @llvm.riscv.vasub.rm.nxv16i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv16i8.i8( +declare @llvm.riscv.vasub.rm.mask.nxv16i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv16i8.i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv16i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv32i8.i8( +declare @llvm.riscv.vasub.rm.nxv32i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv32i8.i8( + %a = call @llvm.riscv.vasub.rm.nxv32i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv32i8.i8( +declare @llvm.riscv.vasub.rm.mask.nxv32i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv32i8.i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv32i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv64i8.i8( +declare @llvm.riscv.vasub.rm.nxv64i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv64i8.i8( + %a = call @llvm.riscv.vasub.rm.nxv64i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv64i8.i8( +declare @llvm.riscv.vasub.rm.mask.nxv64i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv64i8.i8( + %a = call @llvm.riscv.vasub.rm.mask.nxv64i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv1i16.i16( +declare @llvm.riscv.vasub.rm.nxv1i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i16.i16( + %a = call @llvm.riscv.vasub.rm.nxv1i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv1i16.i16( +declare @llvm.riscv.vasub.rm.mask.nxv1i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv1i16.i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv1i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv2i16.i16( +declare @llvm.riscv.vasub.rm.nxv2i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv2i16.i16( + %a = call @llvm.riscv.vasub.rm.nxv2i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv2i16.i16( +declare @llvm.riscv.vasub.rm.mask.nxv2i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv2i16.i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv2i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv4i16.i16( +declare @llvm.riscv.vasub.rm.nxv4i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv4i16.i16( + %a = call @llvm.riscv.vasub.rm.nxv4i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv4i16.i16( +declare @llvm.riscv.vasub.rm.mask.nxv4i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv4i16.i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv4i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv8i16.i16( +declare @llvm.riscv.vasub.rm.nxv8i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv8i16.i16( + %a = call @llvm.riscv.vasub.rm.nxv8i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv8i16.i16( +declare @llvm.riscv.vasub.rm.mask.nxv8i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv8i16.i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv8i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv16i16.i16( +declare @llvm.riscv.vasub.rm.nxv16i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv16i16.i16( + %a = call @llvm.riscv.vasub.rm.nxv16i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv16i16.i16( +declare @llvm.riscv.vasub.rm.mask.nxv16i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv16i16.i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv16i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv32i16.i16( +declare @llvm.riscv.vasub.rm.nxv32i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv32i16.i16( + %a = call @llvm.riscv.vasub.rm.nxv32i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv32i16.i16( +declare @llvm.riscv.vasub.rm.mask.nxv32i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv32i16.i16( + %a = call @llvm.riscv.vasub.rm.mask.nxv32i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv1i32.i32( +declare @llvm.riscv.vasub.rm.nxv1i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i32.i32( + %a = call @llvm.riscv.vasub.rm.nxv1i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv1i32.i32( +declare @llvm.riscv.vasub.rm.mask.nxv1i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv1i32.i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv1i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv2i32.i32( +declare @llvm.riscv.vasub.rm.nxv2i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv2i32.i32( + %a = call @llvm.riscv.vasub.rm.nxv2i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv2i32.i32( +declare @llvm.riscv.vasub.rm.mask.nxv2i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv2i32.i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv2i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv4i32.i32( +declare @llvm.riscv.vasub.rm.nxv4i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv4i32.i32( + %a = call @llvm.riscv.vasub.rm.nxv4i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv4i32.i32( +declare @llvm.riscv.vasub.rm.mask.nxv4i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv4i32.i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv4i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv8i32.i32( +declare @llvm.riscv.vasub.rm.nxv8i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv8i32.i32( + %a = call @llvm.riscv.vasub.rm.nxv8i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv8i32.i32( +declare @llvm.riscv.vasub.rm.mask.nxv8i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv8i32.i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv8i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv16i32.i32( +declare @llvm.riscv.vasub.rm.nxv16i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv16i32.i32( + %a = call @llvm.riscv.vasub.rm.nxv16i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv16i32.i32( +declare @llvm.riscv.vasub.rm.mask.nxv16i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv16i32.i32( + %a = call @llvm.riscv.vasub.rm.mask.nxv16i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv1i64.i64( +declare @llvm.riscv.vasub.rm.nxv1i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64: @@ -1862,6 +1942,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vasub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1869,24 +1950,25 @@ ; RV64-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vasub.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv1i64.i64( + %a = call @llvm.riscv.vasub.rm.nxv1i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv1i64.i64( +declare @llvm.riscv.vasub.rm.mask.nxv1i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64: @@ -1897,6 +1979,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vasub.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1904,24 +1987,25 @@ ; RV64-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vasub.vx v8, v9, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv1i64.i64( + %a = call @llvm.riscv.vasub.rm.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv2i64.i64( +declare @llvm.riscv.vasub.rm.nxv2i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64: @@ -1932,6 +2016,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vasub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1939,24 +2024,25 @@ ; RV64-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vasub.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv2i64.i64( + %a = call @llvm.riscv.vasub.rm.nxv2i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv2i64.i64( +declare @llvm.riscv.vasub.rm.mask.nxv2i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64: @@ -1967,6 +2053,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vasub.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1974,24 +2061,25 @@ ; RV64-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vasub.vx v8, v10, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv2i64.i64( + %a = call @llvm.riscv.vasub.rm.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv4i64.i64( +declare @llvm.riscv.vasub.rm.nxv4i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64: @@ -2002,6 +2090,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vasub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2009,24 +2098,25 @@ ; RV64-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vasub.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv4i64.i64( + %a = call @llvm.riscv.vasub.rm.nxv4i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv4i64.i64( +declare @llvm.riscv.vasub.rm.mask.nxv4i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64: @@ -2037,6 +2127,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vasub.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2044,24 +2135,25 @@ ; RV64-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vasub.vx v8, v12, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv4i64.i64( + %a = call @llvm.riscv.vasub.rm.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasub.nxv8i64.i64( +declare @llvm.riscv.vasub.rm.nxv8i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64: @@ -2072,6 +2164,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vasub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2079,24 +2172,25 @@ ; RV64-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vasub.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasub.nxv8i64.i64( + %a = call @llvm.riscv.vasub.rm.nxv8i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasub.mask.nxv8i64.i64( +declare @llvm.riscv.vasub.rm.mask.nxv8i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64: @@ -2107,6 +2201,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vasub.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2114,15 +2209,16 @@ ; RV64-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vasub.vx v8, v16, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasub.mask.nxv8i64.i64( + %a = call @llvm.riscv.vasub.rm.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll @@ -3,1855 +3,1935 @@ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( +declare @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8( +declare @llvm.riscv.vasubu.rm.mask.nxv1i8.nxv1i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( +declare @llvm.riscv.vasubu.rm.nxv2i8.nxv2i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv2i8.nxv2i8( + %a = call @llvm.riscv.vasubu.rm.nxv2i8.nxv2i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8( +declare @llvm.riscv.vasubu.rm.mask.nxv2i8.nxv2i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv2i8.nxv2i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( +declare @llvm.riscv.vasubu.rm.nxv4i8.nxv4i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv4i8.nxv4i8( + %a = call @llvm.riscv.vasubu.rm.nxv4i8.nxv4i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8( +declare @llvm.riscv.vasubu.rm.mask.nxv4i8.nxv4i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv4i8.nxv4i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( +declare @llvm.riscv.vasubu.rm.nxv8i8.nxv8i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv8i8.nxv8i8( + %a = call @llvm.riscv.vasubu.rm.nxv8i8.nxv8i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8( +declare @llvm.riscv.vasubu.rm.mask.nxv8i8.nxv8i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv8i8.nxv8i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( +declare @llvm.riscv.vasubu.rm.nxv16i8.nxv16i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv16i8.nxv16i8( + %a = call @llvm.riscv.vasubu.rm.nxv16i8.nxv16i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8( +declare @llvm.riscv.vasubu.rm.mask.nxv16i8.nxv16i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv16i8.nxv16i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( +declare @llvm.riscv.vasubu.rm.nxv32i8.nxv32i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv32i8.nxv32i8( + %a = call @llvm.riscv.vasubu.rm.nxv32i8.nxv32i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8( +declare @llvm.riscv.vasubu.rm.mask.nxv32i8.nxv32i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv32i8.nxv32i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( +declare @llvm.riscv.vasubu.rm.nxv64i8.nxv64i8( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv64i8.nxv64i8( + %a = call @llvm.riscv.vasubu.rm.nxv64i8.nxv64i8( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8( +declare @llvm.riscv.vasubu.rm.mask.nxv64i8.nxv64i8( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv64i8.nxv64i8( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( +declare @llvm.riscv.vasubu.rm.nxv1i16.nxv1i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i16.nxv1i16( + %a = call @llvm.riscv.vasubu.rm.nxv1i16.nxv1i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16( +declare @llvm.riscv.vasubu.rm.mask.nxv1i16.nxv1i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv1i16.nxv1i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( +declare @llvm.riscv.vasubu.rm.nxv2i16.nxv2i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv2i16.nxv2i16( + %a = call @llvm.riscv.vasubu.rm.nxv2i16.nxv2i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16( +declare @llvm.riscv.vasubu.rm.mask.nxv2i16.nxv2i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv2i16.nxv2i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( +declare @llvm.riscv.vasubu.rm.nxv4i16.nxv4i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv4i16.nxv4i16( + %a = call @llvm.riscv.vasubu.rm.nxv4i16.nxv4i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16( +declare @llvm.riscv.vasubu.rm.mask.nxv4i16.nxv4i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv4i16.nxv4i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( +declare @llvm.riscv.vasubu.rm.nxv8i16.nxv8i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv8i16.nxv8i16( + %a = call @llvm.riscv.vasubu.rm.nxv8i16.nxv8i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16( +declare @llvm.riscv.vasubu.rm.mask.nxv8i16.nxv8i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv8i16.nxv8i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( +declare @llvm.riscv.vasubu.rm.nxv16i16.nxv16i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv16i16.nxv16i16( + %a = call @llvm.riscv.vasubu.rm.nxv16i16.nxv16i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16( +declare @llvm.riscv.vasubu.rm.mask.nxv16i16.nxv16i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv16i16.nxv16i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( +declare @llvm.riscv.vasubu.rm.nxv32i16.nxv32i16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv32i16.nxv32i16( + %a = call @llvm.riscv.vasubu.rm.nxv32i16.nxv32i16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16( +declare @llvm.riscv.vasubu.rm.mask.nxv32i16.nxv32i16( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv32i16.nxv32i16( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( +declare @llvm.riscv.vasubu.rm.nxv1i32.nxv1i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i32.nxv1i32( + %a = call @llvm.riscv.vasubu.rm.nxv1i32.nxv1i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32( +declare @llvm.riscv.vasubu.rm.mask.nxv1i32.nxv1i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv1i32.nxv1i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( +declare @llvm.riscv.vasubu.rm.nxv2i32.nxv2i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv2i32.nxv2i32( + %a = call @llvm.riscv.vasubu.rm.nxv2i32.nxv2i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32( +declare @llvm.riscv.vasubu.rm.mask.nxv2i32.nxv2i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv2i32.nxv2i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( +declare @llvm.riscv.vasubu.rm.nxv4i32.nxv4i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv4i32.nxv4i32( + %a = call @llvm.riscv.vasubu.rm.nxv4i32.nxv4i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32( +declare @llvm.riscv.vasubu.rm.mask.nxv4i32.nxv4i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv4i32.nxv4i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( +declare @llvm.riscv.vasubu.rm.nxv8i32.nxv8i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv8i32.nxv8i32( + %a = call @llvm.riscv.vasubu.rm.nxv8i32.nxv8i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32( +declare @llvm.riscv.vasubu.rm.mask.nxv8i32.nxv8i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv8i32.nxv8i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( +declare @llvm.riscv.vasubu.rm.nxv16i32.nxv16i32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv16i32.nxv16i32( + %a = call @llvm.riscv.vasubu.rm.nxv16i32.nxv16i32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32( +declare @llvm.riscv.vasubu.rm.mask.nxv16i32.nxv16i32( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv16i32.nxv16i32( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( +declare @llvm.riscv.vasubu.rm.nxv1i64.nxv1i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i64.nxv1i64( + %a = call @llvm.riscv.vasubu.rm.nxv1i64.nxv1i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64( +declare @llvm.riscv.vasubu.rm.mask.nxv1i64.nxv1i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64( + %a = call @llvm.riscv.vasubu.rm.mask.nxv1i64.nxv1i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( +declare @llvm.riscv.vasubu.rm.nxv2i64.nxv2i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv2i64.nxv2i64( + %a = call @llvm.riscv.vasubu.rm.nxv2i64.nxv2i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64( +declare @llvm.riscv.vasubu.rm.mask.nxv2i64.nxv2i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64( + %a = call @llvm.riscv.vasubu.rm.mask.nxv2i64.nxv2i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( +declare @llvm.riscv.vasubu.rm.nxv4i64.nxv4i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv4i64.nxv4i64( + %a = call @llvm.riscv.vasubu.rm.nxv4i64.nxv4i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64( +declare @llvm.riscv.vasubu.rm.mask.nxv4i64.nxv4i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64( + %a = call @llvm.riscv.vasubu.rm.mask.nxv4i64.nxv4i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( +declare @llvm.riscv.vasubu.rm.nxv8i64.nxv8i64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv8i64.nxv8i64( + %a = call @llvm.riscv.vasubu.rm.nxv8i64.nxv8i64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64( +declare @llvm.riscv.vasubu.rm.mask.nxv8i64.nxv8i64( , , , , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64( + %a = call @llvm.riscv.vasubu.rm.mask.nxv8i64.nxv8i64( %0, %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv1i8.i8( +declare @llvm.riscv.vasubu.rm.nxv1i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i8.i8( + %a = call @llvm.riscv.vasubu.rm.nxv1i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i8.i8( +declare @llvm.riscv.vasubu.rm.mask.nxv1i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv1i8.i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv1i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv2i8.i8( +declare @llvm.riscv.vasubu.rm.nxv2i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv2i8.i8( + %a = call @llvm.riscv.vasubu.rm.nxv2i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i8.i8( +declare @llvm.riscv.vasubu.rm.mask.nxv2i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv2i8.i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv2i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv4i8.i8( +declare @llvm.riscv.vasubu.rm.nxv4i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv4i8.i8( + %a = call @llvm.riscv.vasubu.rm.nxv4i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i8.i8( +declare @llvm.riscv.vasubu.rm.mask.nxv4i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv4i8.i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv4i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv8i8.i8( +declare @llvm.riscv.vasubu.rm.nxv8i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv8i8.i8( + %a = call @llvm.riscv.vasubu.rm.nxv8i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i8.i8( +declare @llvm.riscv.vasubu.rm.mask.nxv8i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv8i8.i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv8i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv16i8.i8( +declare @llvm.riscv.vasubu.rm.nxv16i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv16i8.i8( + %a = call @llvm.riscv.vasubu.rm.nxv16i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i8.i8( +declare @llvm.riscv.vasubu.rm.mask.nxv16i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv16i8.i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv16i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv32i8.i8( +declare @llvm.riscv.vasubu.rm.nxv32i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv32i8.i8( + %a = call @llvm.riscv.vasubu.rm.nxv32i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i8.i8( +declare @llvm.riscv.vasubu.rm.mask.nxv32i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv32i8.i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv32i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv64i8.i8( +declare @llvm.riscv.vasubu.rm.nxv64i8.i8( , , i8, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv64i8.i8( + %a = call @llvm.riscv.vasubu.rm.nxv64i8.i8( undef, %0, i8 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv64i8.i8( +declare @llvm.riscv.vasubu.rm.mask.nxv64i8.i8( , , i8, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv64i8.i8( + %a = call @llvm.riscv.vasubu.rm.mask.nxv64i8.i8( %0, %1, i8 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv1i16.i16( +declare @llvm.riscv.vasubu.rm.nxv1i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i16.i16( + %a = call @llvm.riscv.vasubu.rm.nxv1i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i16.i16( +declare @llvm.riscv.vasubu.rm.mask.nxv1i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv1i16.i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv1i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv2i16.i16( +declare @llvm.riscv.vasubu.rm.nxv2i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv2i16.i16( + %a = call @llvm.riscv.vasubu.rm.nxv2i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i16.i16( +declare @llvm.riscv.vasubu.rm.mask.nxv2i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv2i16.i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv2i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv4i16.i16( +declare @llvm.riscv.vasubu.rm.nxv4i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv4i16.i16( + %a = call @llvm.riscv.vasubu.rm.nxv4i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i16.i16( +declare @llvm.riscv.vasubu.rm.mask.nxv4i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv4i16.i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv4i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv8i16.i16( +declare @llvm.riscv.vasubu.rm.nxv8i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv8i16.i16( + %a = call @llvm.riscv.vasubu.rm.nxv8i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i16.i16( +declare @llvm.riscv.vasubu.rm.mask.nxv8i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv8i16.i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv8i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv16i16.i16( +declare @llvm.riscv.vasubu.rm.nxv16i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv16i16.i16( + %a = call @llvm.riscv.vasubu.rm.nxv16i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i16.i16( +declare @llvm.riscv.vasubu.rm.mask.nxv16i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv16i16.i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv16i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv32i16.i16( +declare @llvm.riscv.vasubu.rm.nxv32i16.i16( , , i16, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv32i16.i16( + %a = call @llvm.riscv.vasubu.rm.nxv32i16.i16( undef, %0, i16 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i16.i16( +declare @llvm.riscv.vasubu.rm.mask.nxv32i16.i16( , , i16, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv32i16.i16( + %a = call @llvm.riscv.vasubu.rm.mask.nxv32i16.i16( %0, %1, i16 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv1i32.i32( +declare @llvm.riscv.vasubu.rm.nxv1i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i32.i32( + %a = call @llvm.riscv.vasubu.rm.nxv1i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i32.i32( +declare @llvm.riscv.vasubu.rm.mask.nxv1i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv1i32.i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv1i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv2i32.i32( +declare @llvm.riscv.vasubu.rm.nxv2i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv2i32.i32( + %a = call @llvm.riscv.vasubu.rm.nxv2i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i32.i32( +declare @llvm.riscv.vasubu.rm.mask.nxv2i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv2i32.i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv2i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv4i32.i32( +declare @llvm.riscv.vasubu.rm.nxv4i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv4i32.i32( + %a = call @llvm.riscv.vasubu.rm.nxv4i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i32.i32( +declare @llvm.riscv.vasubu.rm.mask.nxv4i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv4i32.i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv4i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv8i32.i32( +declare @llvm.riscv.vasubu.rm.nxv8i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv8i32.i32( + %a = call @llvm.riscv.vasubu.rm.nxv8i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i32.i32( +declare @llvm.riscv.vasubu.rm.mask.nxv8i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv8i32.i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv8i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv16i32.i32( +declare @llvm.riscv.vasubu.rm.nxv16i32.i32( , , i32, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv16i32.i32( + %a = call @llvm.riscv.vasubu.rm.nxv16i32.i32( undef, %0, i32 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i32.i32( +declare @llvm.riscv.vasubu.rm.mask.nxv16i32.i32( , , i32, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: csrwi vxrm, 1 ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv16i32.i32( + %a = call @llvm.riscv.vasubu.rm.mask.nxv16i32.i32( %0, %1, i32 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv1i64.i64( +declare @llvm.riscv.vasubu.rm.nxv1i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64: @@ -1862,6 +1942,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vasubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1869,24 +1950,25 @@ ; RV64-LABEL: intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vasubu.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv1i64.i64( + %a = call @llvm.riscv.vasubu.rm.nxv1i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i64.i64( +declare @llvm.riscv.vasubu.rm.mask.nxv1i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64: @@ -1897,6 +1979,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vasubu.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1904,24 +1987,25 @@ ; RV64-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vasubu.vx v8, v9, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv1i64.i64( + %a = call @llvm.riscv.vasubu.rm.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv2i64.i64( +declare @llvm.riscv.vasubu.rm.nxv2i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64: @@ -1932,6 +2016,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vasubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1939,24 +2024,25 @@ ; RV64-LABEL: intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vasubu.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv2i64.i64( + %a = call @llvm.riscv.vasubu.rm.nxv2i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i64.i64( +declare @llvm.riscv.vasubu.rm.mask.nxv2i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64: @@ -1967,6 +2053,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vasubu.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1974,24 +2061,25 @@ ; RV64-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vasubu.vx v8, v10, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv2i64.i64( + %a = call @llvm.riscv.vasubu.rm.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv4i64.i64( +declare @llvm.riscv.vasubu.rm.nxv4i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64: @@ -2002,6 +2090,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vasubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2009,24 +2098,25 @@ ; RV64-LABEL: intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vasubu.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv4i64.i64( + %a = call @llvm.riscv.vasubu.rm.nxv4i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i64.i64( +declare @llvm.riscv.vasubu.rm.mask.nxv4i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64: @@ -2037,6 +2127,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vasubu.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2044,24 +2135,25 @@ ; RV64-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vasubu.vx v8, v12, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv4i64.i64( + %a = call @llvm.riscv.vasubu.rm.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } -declare @llvm.riscv.vasubu.nxv8i64.i64( +declare @llvm.riscv.vasubu.rm.nxv8i64.i64( , , i64, - iXLen); + iXLen, iXLen); define @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64: @@ -2072,6 +2164,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: csrwi vxrm, 0 ; RV32-NEXT: vasubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2079,24 +2172,25 @@ ; RV64-LABEL: intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: csrwi vxrm, 0 ; RV64-NEXT: vasubu.vx v8, v8, a0 ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.nxv8i64.i64( + %a = call @llvm.riscv.vasubu.rm.nxv8i64.i64( undef, %0, i64 %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i64.i64( +declare @llvm.riscv.vasubu.rm.mask.nxv8i64.i64( , , i64, , - iXLen, iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64: @@ -2107,6 +2201,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: csrwi vxrm, 1 ; RV32-NEXT: vasubu.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2114,15 +2209,16 @@ ; RV64-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: csrwi vxrm, 1 ; RV64-NEXT: vasubu.vx v8, v16, a0, v0.t ; RV64-NEXT: ret entry: - %a = call @llvm.riscv.vasubu.mask.nxv8i64.i64( + %a = call @llvm.riscv.vasubu.rm.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - iXLen %4, iXLen 1) + iXLen 1, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm.mir b/llvm/test/CodeGen/RISCV/rvv/vxrm.mir --- a/llvm/test/CodeGen/RISCV/rvv/vxrm.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vxrm.mir @@ -14,7 +14,7 @@ ; MIR-NEXT: {{ $}} ; MIR-NEXT: dead $x0 = PseudoVSETVLI renamable $x10, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype ; MIR-NEXT: WriteVXRMImm 0, implicit-def $vxrm - ; MIR-NEXT: renamable $v8 = PseudoVAADD_VV_MF8 renamable $v8, renamable $v9, $noreg, 3 /* e8 */, implicit $vxrm, implicit $vl, implicit $vtype, implicit $vl, implicit $vtype + ; MIR-NEXT: renamable $v8 = PseudoVAADD_VV_MF8 renamable $v8, renamable $v9, 0, $noreg, 3 /* e8 */, implicit $vl, implicit $vtype, implicit $vxrm ; MIR-NEXT: PseudoRET implicit $v8 ; ASM-LABEL: verify_vxrm: ; ASM: # %bb.0: @@ -25,7 +25,6 @@ %0:vr = COPY $v8 %1:vr = COPY $v9 dead $x0 = PseudoVSETVLI killed renamable $x10, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype - WriteVXRMImm 0, implicit-def $vxrm - renamable $v8 = PseudoVAADD_VV_MF8 killed renamable $v8, killed renamable $v9, $noreg, 3 /* e8 */, implicit $vxrm, implicit $vl, implicit $vtype + renamable $v8 = PseudoVAADD_VV_MF8 killed renamable $v8, killed renamable $v9, 0, $noreg, 3 /* e8 */ PseudoRET implicit $v8 ...