diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -66,3 +66,43 @@ defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics; } // TargetPrefix = "riscv" + +//===----------------------------------------------------------------------===// +// Vectors + +class RISCVVIntrinsic { + // These intrinsics may accept illegal integer values in their llvm_any_ty + // operand, so they have to be extended. If set to zero then the intrinsic + // does not have any operand that must be extended. + Intrinsic IntrinsicID = !cast(NAME); + bits<4> ExtendOperand = 0; +} + +let TargetPrefix = "riscv" in { + // For destination vector type is the same as first source vector. + // Input: (vector_in, vector_in/scalar_in, vl) + class RISCVBinaryAAXNoMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } + // For destination vector type is the same as first source vector (with mask). + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) + class RISCVBinaryAAXMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyvector_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 3; + } + + multiclass RISCVBinaryAAX { + def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask; + } + + defm vadd : RISCVBinaryAAX; + defm vsub : RISCVBinaryAAX; + defm vrsub : RISCVBinaryAAX; +} // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -87,6 +87,8 @@ explicit RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI); + const RISCVSubtarget &getSubtarget() const { return Subtarget; } + bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override; @@ -269,6 +271,20 @@ const SmallVectorImpl> &Regs, MachineFunction &MF) const; }; + +namespace RISCVVIntrinsicsTable { + +struct RISCVVIntrinsicInfo { + unsigned int IntrinsicID; + unsigned int ExtendedOperand; +}; + +using namespace RISCV; + +#define GET_RISCVVIntrinsicsTable_DECL +#include "RISCVGenSearchableTables.inc" + +} // end namespace RISCVVIntrinsicsTable } #endif diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -331,8 +331,12 @@ setBooleanContents(ZeroOrOneBooleanContent); - if (Subtarget.hasStdExtV()) + if (Subtarget.hasStdExtV()) { setBooleanVectorContents(ZeroOrOneBooleanContent); + // RVV intrinsics may have illegal operands. + for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) + setOperationAction(ISD::INTRINSIC_WO_CHAIN, VT, Custom); + } // Function alignments. const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); @@ -1002,6 +1006,28 @@ SelectionDAG &DAG) const { unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); SDLoc DL(Op); + + if (Subtarget.hasStdExtV()) { + // Some RVV intrinsics may claim that they want an integer operand to be + // extended. + if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = + RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { + if (II->ExtendedOperand) { + assert(II->ExtendedOperand < Op.getNumOperands()); + SmallVector Operands(Op->op_begin(), Op->op_end()); + SDValue &ScalarOp = Operands[II->ExtendedOperand]; + if (ScalarOp.getValueType() == MVT::i8 || + ScalarOp.getValueType() == MVT::i16 || + ScalarOp.getValueType() == MVT::i32) { + ScalarOp = + DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp); + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), + Operands); + } + } + } + } + switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -2038,6 +2064,16 @@ RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D }; +// This is an interim calling convention and it may be changed in the future. +static const MCPhysReg ArgVRs[] = { + RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20, + RISCV::V21, RISCV::V22, RISCV::V23 +}; +static const MCPhysReg ArgVRM2s[] = { + RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2 +}; +static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4}; +static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8}; // Pass a 2*XLEN argument that has been split into two XLEN values through // registers or the stack as necessary. @@ -2082,7 +2118,8 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, - bool IsRet, Type *OrigTy) { + bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, + Optional FirstMaskArgument) { unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); assert(XLen == 32 || XLen == 64); MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; @@ -2215,7 +2252,34 @@ Reg = State.AllocateReg(ArgFPR32s); else if (ValVT == MVT::f64 && !UseGPRForF64) Reg = State.AllocateReg(ArgFPR64s); - else + else if (ValVT.isScalableVector()) { + const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); + if (RC == &RISCV::VRRegClass) { + // Assign the first mask argument to V0. + // This is an interim calling convention and it may be changed in the + // future. + if (FirstMaskArgument.hasValue() && + ValNo == FirstMaskArgument.getValue()) { + Reg = State.AllocateReg(RISCV::V0); + } else { + Reg = State.AllocateReg(ArgVRs); + } + } else if (RC == &RISCV::VRM2RegClass) { + Reg = State.AllocateReg(ArgVRM2s); + } else if (RC == &RISCV::VRM4RegClass) { + Reg = State.AllocateReg(ArgVRM4s); + } else if (RC == &RISCV::VRM8RegClass) { + Reg = State.AllocateReg(ArgVRM8s); + } else { + llvm_unreachable("Unhandled class register for ValueType"); + } + if (!Reg) { + LocInfo = CCValAssign::Indirect; + // Try using a GPR to pass the address + Reg = State.AllocateReg(ArgGPRs); + LocVT = XLenVT; + } + } else Reg = State.AllocateReg(ArgGPRs); unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); @@ -2238,8 +2302,9 @@ return false; } - assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT) && - "Expected an XLenVT at this stage"); + assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || + (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) && + "Expected an XLenVT or scalable vector types at this stage"); if (Reg) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); @@ -2256,12 +2321,32 @@ return false; } +template +static void PreAssignMask(const ArgTy &Args, + Optional &FirstMaskArgument, + CCState &CCInfo) { + unsigned NumArgs = Args.size(); + for (unsigned i = 0; i != NumArgs; ++i) { + MVT ArgVT = Args[i].VT; + if (!ArgVT.isScalableVector() || + ArgVT.getVectorElementType().SimpleTy != MVT::i1) + continue; + + FirstMaskArgument = i; + break; + } +} + void RISCVTargetLowering::analyzeInputArgs( MachineFunction &MF, CCState &CCInfo, const SmallVectorImpl &Ins, bool IsRet) const { unsigned NumArgs = Ins.size(); FunctionType *FType = MF.getFunction().getFunctionType(); + Optional FirstMaskArgument; + if (Subtarget.hasStdExtV()) + PreAssignMask(Ins, FirstMaskArgument, CCInfo); + for (unsigned i = 0; i != NumArgs; ++i) { MVT ArgVT = Ins[i].VT; ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; @@ -2274,7 +2359,8 @@ RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, - ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) { + ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, + FirstMaskArgument)) { LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << '\n'); llvm_unreachable(nullptr); @@ -2288,6 +2374,10 @@ CallLoweringInfo *CLI) const { unsigned NumArgs = Outs.size(); + Optional FirstMaskArgument; + if (Subtarget.hasStdExtV()) + PreAssignMask(Outs, FirstMaskArgument, CCInfo); + for (unsigned i = 0; i != NumArgs; i++) { MVT ArgVT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; @@ -2295,7 +2385,8 @@ RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, - ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { + ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, + FirstMaskArgument)) { LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << "\n"); llvm_unreachable(nullptr); @@ -2327,31 +2418,13 @@ // The caller is responsible for loading the full value if the argument is // passed with CCValAssign::Indirect. static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, - const CCValAssign &VA, const SDLoc &DL) { + const CCValAssign &VA, const SDLoc &DL, + const RISCVTargetLowering &TLI) { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); EVT LocVT = VA.getLocVT(); SDValue Val; - const TargetRegisterClass *RC; - - switch (LocVT.getSimpleVT().SimpleTy) { - default: - llvm_unreachable("Unexpected register type"); - case MVT::i32: - case MVT::i64: - RC = &RISCV::GPRRegClass; - break; - case MVT::f16: - RC = &RISCV::FPR16RegClass; - break; - case MVT::f32: - RC = &RISCV::FPR32RegClass; - break; - case MVT::f64: - RC = &RISCV::FPR64RegClass; - break; - } - + const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); Register VReg = RegInfo.createVirtualRegister(RC); RegInfo.addLiveIn(VA.getLocReg(), VReg); Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); @@ -2623,7 +2696,7 @@ if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); else if (VA.isRegLoc()) - ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL); + ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); else ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); @@ -3071,12 +3144,18 @@ const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); + + Optional FirstMaskArgument; + if (Subtarget.hasStdExtV()) + PreAssignMask(Outs, FirstMaskArgument, CCInfo); + for (unsigned i = 0, e = Outs.size(); i != e; ++i) { MVT VT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, - ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr)) + ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, + *this, FirstMaskArgument)) return false; } return true; @@ -3673,3 +3752,12 @@ StringRef(RegName) + "\".")); return Reg; } + +namespace llvm { +namespace RISCVVIntrinsicsTable { + +#define GET_RISCVVIntrinsicsTable_IMPL +#include "RISCVGenSearchableTables.inc" + +} // namespace RISCVVIntrinsicsTable +} // namespace llvm diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -100,18 +100,35 @@ // FPR->FPR copies unsigned Opc; + bool IsScalableVector = false; if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) Opc = RISCV::FSGNJ_H; else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) Opc = RISCV::FSGNJ_S; else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) Opc = RISCV::FSGNJ_D; - else + else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) { + Opc = RISCV::PseudoVMV1R_V; + IsScalableVector = true; + } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) { + Opc = RISCV::PseudoVMV2R_V; + IsScalableVector = true; + } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) { + Opc = RISCV::PseudoVMV4R_V; + IsScalableVector = true; + } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) { + Opc = RISCV::PseudoVMV8R_V; + IsScalableVector = true; + } else llvm_unreachable("Impossible reg-to-reg copy"); - BuildMI(MBB, MBBI, DL, get(Opc), DstReg) - .addReg(SrcReg, getKillRegState(KillSrc)) - .addReg(SrcReg, getKillRegState(KillSrc)); + if (IsScalableVector) + BuildMI(MBB, MBBI, DL, get(Opc), DstReg) + .addReg(SrcReg, getKillRegState(KillSrc)); + else + BuildMI(MBB, MBBI, DL, get(Opc), DstReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addReg(SrcReg, getKillRegState(KillSrc)); } void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -14,6 +14,23 @@ /// //===----------------------------------------------------------------------===// +// X0 has special meaning for vsetvl/vsetvli. +// rd | rs1 | AVL value | Effect on vl +//-------------------------------------------------------------- +// !X0 | X0 | VLMAX | Set vl to VLMAX +// X0 | X0 | Value in vl | Keep current vl, just change vtype. +def NoX0 : SDNodeXForm(N); + if (C && C->isNullValue()) { + SDLoc DL(N); + return SDValue(CurDAG->getMachineNode(RISCV::ADDI, DL, Subtarget->getXLenVT(), + CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT()), + CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT())), 0); + } + return SDValue(N, 0); +}]>; + //===----------------------------------------------------------------------===// // Utilities. //===----------------------------------------------------------------------===// @@ -50,6 +67,18 @@ // List of EEW. defvar EEWList = [8, 16, 32, 64]; +// We only model FPR32 for V instructions in RISCVInstrInfoV.td. +// FP16/FP32/FP64 registers are alias each other. Convert FPR16 and FPR64 +// to FPR32 for V instructions is enough. +class ToFPR32 { + dag ret = !cond(!eq(!cast(operand), !cast(FPR64)): + (EXTRACT_SUBREG !dag(type, [FPR64], [name]), sub_32), + !eq(!cast(operand), !cast(FPR16)): + (SUBREG_TO_REG (i16 -1), !dag(type, [FPR16], [name]), sub_16), + !eq(1, 1): + !dag(type, [operand], [name])); +} + //===----------------------------------------------------------------------===// // Vector register and vector group type information. //===----------------------------------------------------------------------===// @@ -103,13 +132,20 @@ // This class holds the record of the RISCVVPseudoTable below. // This represents the information we need in codegen for each pseudo. +// The definition should be consistent with `struct PseudoInfo` in +// RISCVBaseInfo.h. +class CONST8b val> { + bits<8> V = val; +} +def INVALID_INDEX : CONST8b<0x80>; class RISCVVPseudo { Pseudo Pseudo = !cast(NAME); // Used as a key. Instruction BaseInstr; - bits<8> VLIndex; - bits<8> SEWIndex; - bits<8> MergeOpIndex; + bits<8> VLIndex = INVALID_INDEX.V; + bits<8> SEWIndex = INVALID_INDEX.V; + bits<8> MergeOpIndex = INVALID_INDEX.V; bits<3> VLMul; + bit HasDummyMask = 0; } // The actual table. @@ -117,11 +153,19 @@ let FilterClass = "RISCVVPseudo"; let CppTypeName = "PseudoInfo"; let Fields = [ "Pseudo", "BaseInstr", "VLIndex", "SEWIndex", "MergeOpIndex", - "VLMul" ]; + "VLMul", "HasDummyMask" ]; let PrimaryKey = [ "Pseudo" ]; let PrimaryKeyName = "getPseudoInfo"; } +def RISCVVIntrinsicsTable : GenericTable { + let FilterClass = "RISCVVIntrinsic"; + let CppTypeName = "RISCVVIntrinsicInfo"; + let Fields = ["IntrinsicID", "ExtendOperand"]; + let PrimaryKey = ["IntrinsicID"]; + let PrimaryKeyName = "getRISCVVIntrinsicInfo"; +} + //===----------------------------------------------------------------------===// // Helpers to define the different pseudo instructions. //===----------------------------------------------------------------------===// @@ -138,27 +182,53 @@ !subst("Pseudo", "", PseudoInst))))))))); } -class VPseudoBinary : +// The destination vector register group for a masked vector instruction cannot +// overlap the source mask register (v0), unless the destination vector register +// is being written with a mask value (e.g., comparisons) or the scalar result +// of a reduction. +class GetVRegNoV0 { + VReg R = !cond(!eq(VRegClass, VR) : VRNoV0, + !eq(VRegClass, VRM2) : VRM2NoV0, + !eq(VRegClass, VRM4) : VRM4NoV0, + !eq(VRegClass, VRM8) : VRM8NoV0, + !eq(1, 1) : VRegClass); +} + +class VPseudo : + Pseudo, RISCVVPseudo { + let BaseInstr = instr; + let VLMul = m.value; +} + +class VPseudoBinaryNoMask : Pseudo<(outs RetClass:$rd), (ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>, RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; let Uses = [VL, VTYPE]; let VLIndex = 3; let SEWIndex = 4; - let MergeOpIndex = -1; + let HasDummyMask = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } class VPseudoBinaryMask : - Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, Op1Class:$rs2, Op2Class:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; let Constraints = "$rd = $merge"; let Uses = [VL, VTYPE]; let VLIndex = 5; @@ -171,75 +241,181 @@ VReg Op1Class, DAGOperand Op2Class, LMULInfo MInfo> { - def "_" # MInfo.MX : VPseudoBinary; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask; + let VLMul = MInfo.value in { + def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask; + } } multiclass VPseudoBinaryV_VV { - let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in foreach m = MxList.m in - { - let VLMul = m.value in defm _VV : VPseudoBinary; - } } multiclass VPseudoBinaryV_VX { - let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in foreach m = MxList.m in - { - let VLMul = m.value in defm _VX : VPseudoBinary; - } } multiclass VPseudoBinaryV_VI { - let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in foreach m = MxList.m in - { - let VLMul = m.value in defm _VI : VPseudoBinary; - } } -multiclass VPseudoBinary_VV_VX_VI { +multiclass VPseudoBinaryV_VV_VX_VI { defm "" : VPseudoBinaryV_VV; defm "" : VPseudoBinaryV_VX; defm "" : VPseudoBinaryV_VI; } +multiclass VPseudoBinaryV_VV_VX { + defm "" : VPseudoBinaryV_VV; + defm "" : VPseudoBinaryV_VX; +} + +multiclass VPseudoBinaryV_VX_VI { + defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VI; +} + //===----------------------------------------------------------------------===// // Helpers to define the different patterns. //===----------------------------------------------------------------------===// - -multiclass pat_vop_binary +class VPatBinarySDNode : + Pat<(result_type (vop + (op_type op_reg_class:$rs1), + (op_type op_reg_class:$rs2))), + (!cast(instruction_name#"_VV_"# vlmul.MX) + op_reg_class:$rs1, + op_reg_class:$rs2, + VLMax, sew)>; + +multiclass VPatBinarySDNode { - defvar instruction = !cast(instruction_name#"_VV_"# vlmul.MX); - def : Pat<(result_type (vop - (op_type op_reg_class:$rs1), - (op_type op_reg_class:$rs2))), - (instruction op_reg_class:$rs1, - op_reg_class:$rs2, - VLMax, sew)>; -} - -multiclass pat_vop_binary_common vtilist> + foreach vti = AllIntegerVectors in + def : VPatBinarySDNode; +} + +class VPatBinaryNoMask : + Pat<(result_type (!cast(intrinsic_name) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op1_type op1_reg_class:$rs1), + ToFPR32.ret, + (NoX0 GPR:$vl), sew)>; + +class VPatBinaryMask : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + ToFPR32.ret, + (mask_type V0), (NoX0 GPR:$vl), sew)>; + +multiclass VPatBinary { + def : VPatBinaryNoMask; + def : VPatBinaryMask; +} + +multiclass VPatBinaryV_VV vtilist> { + foreach vti = vtilist in + defm : VPatBinary; +} + +multiclass VPatBinaryV_VX vtilist> { + foreach vti = vtilist in + defm : VPatBinary; +} + +multiclass VPatBinaryV_VI vtilist, Operand imm_type> { foreach vti = vtilist in - defm : pat_vop_binary; + defm : VPatBinary; +} + +multiclass VPatBinaryV_VV_VX_VI vtilist> +{ + defm "" : VPatBinaryV_VV; + defm "" : VPatBinaryV_VX; + defm "" : VPatBinaryV_VI; +} + +multiclass VPatBinaryV_VV_VX vtilist> +{ + defm "" : VPatBinaryV_VV; + defm "" : VPatBinaryV_VX; +} + +multiclass VPatBinaryV_VX_VI vtilist> +{ + defm "" : VPatBinaryV_VX; + defm "" : VPatBinaryV_VI; } //===----------------------------------------------------------------------===// @@ -249,6 +425,16 @@ let Predicates = [HasStdExtV] in { //===----------------------------------------------------------------------===// +// Pseudo Instructions for CodeGen +//===----------------------------------------------------------------------===// +let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { + def PseudoVMV1R_V : VPseudo; + def PseudoVMV2R_V : VPseudo; + def PseudoVMV4R_V : VPseudo; + def PseudoVMV8R_V : VPseudo; +} + +//===----------------------------------------------------------------------===// // 6. Configuration-Setting Instructions //===----------------------------------------------------------------------===// @@ -342,17 +528,36 @@ } //===----------------------------------------------------------------------===// +// Pseudo Instructions +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // 12.1. Vector Single-Width Integer Add and Subtract //===----------------------------------------------------------------------===// +defm PseudoVADD : VPseudoBinaryV_VV_VX_VI; +defm PseudoVSUB : VPseudoBinaryV_VV_VX; +defm PseudoVRSUB : VPseudoBinaryV_VX_VI; -// Pseudo instructions. -defm PseudoVADD : VPseudoBinary_VV_VX_VI; +//===----------------------------------------------------------------------===// +// Patterns. +//===----------------------------------------------------------------------===// // Whole-register vector patterns. -defm "" : pat_vop_binary_common; +defm "" : VPatBinarySDNode; + +//===----------------------------------------------------------------------===// +// 12. Vector Integer Arithmetic Instructions +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// 12.1. Vector Single-Width Integer Add and Subtract +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>; +defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>; } // Predicates = [HasStdExtV] diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -177,9 +177,9 @@ OutMI.addOperand(MCOp); } - // Unmasked pseudo instructions define MergeOpIndex to -1. - // Append dummy mask operand to V instructions. - if (RVV->getMergeOpIndex() == -1) + // Unmasked pseudo instructions need to append dummy mask operand to + // V instructions. All V instructions are modeled as the masked version. + if (RVV->hasDummyMask()) OutMI.addOperand(MCOperand::createReg(RISCV::NoRegister)); return true; diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -413,14 +413,27 @@ (add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2, V18M2, V20M2, V22M2, V24M2, V0M2, V2M2, V4M2, V6M2), 2>; +def VRM2NoV0 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t, + vfloat32m2_t, vfloat64m2_t], + (add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2, + V18M2, V20M2, V22M2, V24M2, V2M2, V4M2, V6M2), 2>; + def VRM4 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t, vfloat32m4_t, vfloat64m4_t], (add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V0M4, V4M4), 4>; +def VRM4NoV0 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t, + vfloat32m4_t, vfloat64m4_t], + (add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V4M4), 4>; + def VRM8 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t, vfloat32m8_t, vfloat64m8_t], (add V8M8, V16M8, V24M8, V0M8), 8>; +def VRM8NoV0 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t, + vfloat32m8_t, vfloat64m8_t], + (add V8M8, V16M8, V24M8), 8>; + defvar VMaskVTs = [vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t, vbool2_t, vbool1_t]; diff --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h @@ -391,6 +391,10 @@ namespace RISCVVPseudosTable { +// The definition should be consistent with `class RISCVVPseudo` in +// RISCVInstrInfoVPseudos.td. +static const uint8_t INVALID_INDEX = 0x80; + struct PseudoInfo { unsigned int Pseudo; unsigned int BaseInstr; @@ -398,12 +402,15 @@ uint8_t SEWIndex; uint8_t MergeOpIndex; uint8_t VLMul; + bool HasDummyMask; int getVLIndex() const { return static_cast(VLIndex); } int getSEWIndex() const { return static_cast(SEWIndex); } int getMergeOpIndex() const { return static_cast(MergeOpIndex); } + + bool hasDummyMask() const { return HasDummyMask; } }; using namespace RISCV; diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll @@ -0,0 +1,2377 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv1i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv2i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv4i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv8i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv16i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv32i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv64i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv1i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv2i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv4i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv8i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv16i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv32i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv1i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv2i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv4i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv8i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv16i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv1i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv2i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv4i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv8i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll @@ -0,0 +1,1497 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vrsub.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrsub.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv1i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv2i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv4i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv8i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv16i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv32i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv64i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv1i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv2i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv4i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv8i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv16i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv32i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv1i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv2i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv4i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv8i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv16i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv1i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv2i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv4i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv8i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub.ll b/llvm/test/CodeGen/RISCV/rvv/vsub.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsub.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +}