diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -23,6 +23,14 @@ // suffixed with the pointer type they are specialised for (denoted `

` in the // names below), in order to avoid type conflicts. +class RISCVVIntrinsic { + // These intrinsics may accept illegal integer values in their llvm_any_ty + // operand, so they have to be extended. If set to zero then the intrinsic + // does not have any operand that must be extended. + Intrinsic IntrinsicID = !cast(NAME); + bits<4> ExtendOperand = 0; +} + let TargetPrefix = "riscv" in { // T @llvm..T.

(any*, T, T, T imm); @@ -65,4 +73,30 @@ // @llvm.riscv.masked.cmpxchg.{i32,i64}.

(...) defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics; + // RISC-V V extension + // For (vector, vector) and (vector, scalar) binary arithmetic. + // Input: (vector_in, vector_in/scalar_in, vl) + class RVVBinaryCommon + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, llvm_i64_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } + // For (vector, vector) and (vector, scalar) bianry arithmetic with mask. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) + class RVVBinaryCommonMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyvector_ty, llvm_i64_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 3; + } + + multiclass riscv_binary { + def "int_riscv_" # NAME : RVVBinaryCommon; + def "int_riscv_" # NAME # "_mask" : RVVBinaryCommonMask; + } + + defm vadd : riscv_binary; + } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -87,6 +87,8 @@ explicit RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI); + const RISCVSubtarget &getSubtarget() const { return Subtarget; } + bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -331,8 +331,12 @@ setBooleanContents(ZeroOrOneBooleanContent); - if (Subtarget.hasStdExtV()) + if (Subtarget.hasStdExtV()) { setBooleanVectorContents(ZeroOrOneBooleanContent); + // RVV intrinsics may have illegal operands. + for (auto VT : {MVT::i1, MVT::i8, MVT::i16, MVT::i32}) + setOperationAction(ISD::INTRINSIC_WO_CHAIN, VT, Custom); + } // Function alignments. const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); @@ -1002,6 +1006,27 @@ SelectionDAG &DAG) const { unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); SDLoc DL(Op); + + if (Subtarget.hasStdExtV()) { + // Some RVV intrinsics may claim that they want an integer operand to be + // extended. + if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = + RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { + if (II->ExtendedOperand) { + assert(II->ExtendedOperand < Op.getNumOperands()); + std::vector Operands(Op->op_begin(), Op->op_end()); + SDValue &ScalarOp = Operands[II->ExtendedOperand]; + if (ScalarOp.getValueType() == MVT::i8 || + ScalarOp.getValueType() == MVT::i16 || + ScalarOp.getValueType() == MVT::i32) { + ScalarOp = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, ScalarOp); + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), + Operands); + } + } + } + } + switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -2038,6 +2063,15 @@ RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D }; +static const MCPhysReg ArgVRs[] = { + RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20, + RISCV::V21, RISCV::V22, RISCV::V23 +}; +static const MCPhysReg ArgVRM2s[] = { + RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2 +}; +static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4}; +static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8}; // Pass a 2*XLEN argument that has been split into two XLEN values through // registers or the stack as necessary. @@ -2082,7 +2116,7 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, - bool IsRet, Type *OrigTy) { + bool IsRet, Type *OrigTy, const RISCVTargetLowering *TLI) { unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); assert(XLen == 32 || XLen == 64); MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; @@ -2215,7 +2249,26 @@ Reg = State.AllocateReg(ArgFPR32s); else if (ValVT == MVT::f64 && !UseGPRForF64) Reg = State.AllocateReg(ArgFPR64s); - else + else if (ValVT.isScalableVector()) { + const TargetRegisterClass *RC = TLI->getRegClassFor(ValVT); + if (RC->hasSuperClassEq(&RISCV::VRRegClass)) { + Reg = State.AllocateReg(ArgVRs); + } else if (RC->hasSuperClassEq(&RISCV::VRM2RegClass)) { + Reg = State.AllocateReg(ArgVRM2s); + } else if (RC->hasSuperClassEq(&RISCV::VRM4RegClass)) { + Reg = State.AllocateReg(ArgVRM4s); + } else if (RC->hasSuperClassEq(&RISCV::VRM8RegClass)) { + Reg = State.AllocateReg(ArgVRM8s); + } else { + llvm_unreachable("Unhandled class register for ValueType"); + } + if (!Reg) { + LocInfo = CCValAssign::Indirect; + // Try using a GPR to pass the address + Reg = State.AllocateReg(ArgGPRs); + LocVT = XLenVT; + } + } else Reg = State.AllocateReg(ArgGPRs); unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); @@ -2238,7 +2291,8 @@ return false; } - assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT) && + assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || + (TLI->getSubtarget().hasStdExtV() && ValVT.isScalableVector())) && "Expected an XLenVT at this stage"); if (Reg) { @@ -2274,7 +2328,7 @@ RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, - ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) { + ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, this)) { LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << '\n'); llvm_unreachable(nullptr); @@ -2295,7 +2349,7 @@ RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, - ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { + ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, this)) { LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << "\n"); llvm_unreachable(nullptr); @@ -2327,29 +2381,34 @@ // The caller is responsible for loading the full value if the argument is // passed with CCValAssign::Indirect. static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, - const CCValAssign &VA, const SDLoc &DL) { + const CCValAssign &VA, const SDLoc &DL, + const RISCVTargetLowering *TLI) { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); EVT LocVT = VA.getLocVT(); SDValue Val; const TargetRegisterClass *RC; - switch (LocVT.getSimpleVT().SimpleTy) { - default: - llvm_unreachable("Unexpected register type"); - case MVT::i32: - case MVT::i64: - RC = &RISCV::GPRRegClass; - break; - case MVT::f16: - RC = &RISCV::FPR16RegClass; - break; - case MVT::f32: - RC = &RISCV::FPR32RegClass; - break; - case MVT::f64: - RC = &RISCV::FPR64RegClass; - break; + if (LocVT.getSimpleVT().isScalableVector()) { + RC = TLI->getRegClassFor(LocVT.getSimpleVT()); + } else { + switch (LocVT.getSimpleVT().SimpleTy) { + default: + llvm_unreachable("Unexpected register type"); + case MVT::i32: + case MVT::i64: + RC = &RISCV::GPRRegClass; + break; + case MVT::f16: + RC = &RISCV::FPR16RegClass; + break; + case MVT::f32: + RC = &RISCV::FPR32RegClass; + break; + case MVT::f64: + RC = &RISCV::FPR64RegClass; + break; + } } Register VReg = RegInfo.createVirtualRegister(RC); @@ -2623,7 +2682,7 @@ if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); else if (VA.isRegLoc()) - ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL); + ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, this); else ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); @@ -3076,7 +3135,8 @@ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, - ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr)) + ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, + this)) return false; } return true; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -14,6 +14,26 @@ /// //===----------------------------------------------------------------------===// +// X0 has special meaning for vsetvl/vsetvli. +// rd | rs1 | AVL value | Effect on vl +//-------------------------------------------- +// !X0 | X0 | ~0 | Set vl to VLMAX +// X0 | X0 | Value in vl | Keep existing vl +def NoX0 : SDNodeXForm(N)) { + if (C->isNullValue()) { + return SDValue(CurDAG->getMachineNode(RISCV::ADDI, DL, MVT::i64, + CurDAG->getRegister(RISCV::X0, MVT::i64), + CurDAG->getTargetConstant(0, DL, MVT::i64)), 0); + } + } + + return SDValue(N, 0); +}]>; + //===----------------------------------------------------------------------===// // Utilities. //===----------------------------------------------------------------------===// @@ -50,6 +70,26 @@ // List of EEW. defvar EEWList = [8, 16, 32, 64]; +class swap_helper { + dag Value = !con( + Prefix, + !if(swap, B, A), + !if(swap, A, B), + Suffix); +} + +// We only model FPR32 for V instructions in RISCVInstrInfoV.td. +// FP16/FP32/FP64 registers are alias each other. Convert FPR16 and FPR64 +// to FPR32 for V instructions is enough. +class ToFPR32 { + dag ret = !cond(!eq(!cast(operand), !cast(FPR64)): + (EXTRACT_SUBREG !dag(type, [FPR64], [name]), sub_32), + !eq(!cast(operand), !cast(FPR16)): + (SUBREG_TO_REG (i16 -1), !dag(type, [FPR16], [name]), sub_16), + !eq(1, 1): + !dag(type, [operand], [name])); +} + //===----------------------------------------------------------------------===// // Vector register and vector group type information. //===----------------------------------------------------------------------===// @@ -122,6 +162,14 @@ let PrimaryKeyName = "getPseudoInfo"; } +def RISCVVIntrinsicsTable : GenericTable { + let FilterClass = "RISCVVIntrinsic"; + let CppTypeName = "RISCVVIntrinsicInfo"; + let Fields = ["IntrinsicID", "ExtendOperand"]; + let PrimaryKey = ["IntrinsicID"]; + let PrimaryKeyName = "getRISCVVIntrinsicInfo"; +} + //===----------------------------------------------------------------------===// // Helpers to define the different pseudo instructions. //===----------------------------------------------------------------------===// @@ -227,10 +275,8 @@ def : Pat<(result_type (vop (op_type op_reg_class:$rs1), (op_type op_reg_class:$rs2))), - (instruction (result_type (IMPLICIT_DEF)), - op_reg_class:$rs1, + (instruction op_reg_class:$rs1, op_reg_class:$rs2, - (mask_type zero_reg), VLMax, sew)>; } @@ -244,6 +290,66 @@ vti.LMul, vti.RegClass, vti.RegClass>; } +multiclass pat_intrinsic_binary +{ + defvar inst = !cast(instruction_name#_#kind#"_"# vlmul.MX); + defvar inst_mask = !cast(instruction_name#_#kind#"_"# vlmul.MX#"_MASK"); + + def : Pat<(result_type (!cast(intrinsic_name) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (i64 GPR:$vl))), + (inst (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (NoX0 GPR:$vl), sew)>; + + def : Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (i64 GPR:$vl))), + swap_helper< + (inst_mask result_reg_class:$merge), + (inst_mask op1_reg_class:$rs1), + (inst_mask ToFPR32.ret), + (inst_mask (mask_type V0), (NoX0 GPR:$vl), sew), + swap>.Value>; +} + +multiclass pat_intrinsic_binary_int_v_vv_vx_vi +{ + foreach vti = AllIntegerVectors in + { + defm : pat_intrinsic_binary; + defm : pat_intrinsic_binary; + defm : pat_intrinsic_binary; + } +} + //===----------------------------------------------------------------------===// // Pseudo instructions and patterns. //===----------------------------------------------------------------------===// @@ -354,7 +460,13 @@ // Pseudo instructions. defm PseudoVADD : VPseudoBinary_VV_VX_VI; +//===----------------------------------------------------------------------===// +// Patterns. +//===----------------------------------------------------------------------===// + // Whole-register vector patterns. defm "" : pat_vop_binary_common; +defm "" : pat_intrinsic_binary_int_v_vv_vx_vi<"int_riscv_vadd", "PseudoVADD">; + } // Predicates = [HasStdExtV] diff --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h @@ -413,6 +413,20 @@ } // end namespace RISCVVPseudosTable +namespace RISCVVIntrinsicsTable { + +struct RISCVVIntrinsicInfo { + unsigned int IntrinsicID; + unsigned int ExtendedOperand; +}; + +using namespace RISCV; + +#define GET_RISCVVIntrinsicsTable_DECL +#include "RISCVGenSearchableTables.inc" + +} // end namespace RISCVVIntrinsicsTable + } // namespace llvm #endif diff --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp --- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp +++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp @@ -14,6 +14,7 @@ #include "RISCVBaseInfo.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Triple.h" +#include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/Support/raw_ostream.h" namespace llvm { @@ -94,6 +95,13 @@ } // namespace RISCVFeatures +namespace RISCVVIntrinsicsTable { + +#define GET_RISCVVIntrinsicsTable_IMPL +#include "RISCVGenSearchableTables.inc" + +} // namespace RISCVVIntrinsicsTable + namespace RISCVVPseudosTable { #define GET_RISCVVPseudosTable_IMPL diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll @@ -0,0 +1,2392 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + i64); + +define @test( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: test +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vadd.vv v16, v16, v17 + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv2i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv4i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vadd.nxv8i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, + i64 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv1i64.i64( + undef, + undef, + i64 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv2i64.i64( + undef, + i64 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv2i64.i64( + undef, + undef, + i64 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv4i64.i64( + undef, + i64 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv4i64.i64( + undef, + undef, + i64 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vadd.nxv8i64.i64( + undef, + i64 9, + i64 undef) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vadd.mask.nxv8i64.i64( + undef, + undef, + i64 9, + undef, + i64 undef) + + ret %a +}