diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -23,6 +23,14 @@
// suffixed with the pointer type they are specialised for (denoted `
` in the
// names below), in order to avoid type conflicts.
+class RISCVVIntrinsic {
+ // These intrinsics may accept illegal integer values in their llvm_any_ty
+ // operand, so they have to be extended. If set to zero then the intrinsic
+ // does not have any operand that must be extended.
+ Intrinsic IntrinsicID = !cast(NAME);
+ bits<4> ExtendOperand = 0;
+}
+
let TargetPrefix = "riscv" in {
// T @llvm..T.(any*, T, T, T imm);
@@ -65,4 +73,32 @@
// @llvm.riscv.masked.cmpxchg.{i32,i64}.
(...)
defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
+ // RISC-V V extension
+ // For (vector, vector) and (vector, scalar) binary arithmetic.
+ // Input: (vector_in, vector_in/scalar_in, vl)
+ class RVVBinaryCommon
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_any_ty, llvm_i64_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let ExtendOperand = 2;
+ }
+ // For (vector, vector) and (vector, scalar) bianry arithmetic with mask.
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
+ class RVVBinaryCommonMask
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+ llvm_anyvector_ty, llvm_i64_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let ExtendOperand = 3;
+ }
+
+ multiclass riscv_binary {
+ def "int_riscv_" # NAME : RVVBinaryCommon;
+ def "int_riscv_" # NAME # "_mask" : RVVBinaryCommonMask;
+ }
+
+ defm vadd : riscv_binary;
+ defm vsub : riscv_binary;
+ defm vrsub : riscv_binary;
+
} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -87,6 +87,8 @@
explicit RISCVTargetLowering(const TargetMachine &TM,
const RISCVSubtarget &STI);
+ const RISCVSubtarget &getSubtarget() const { return Subtarget; }
+
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -331,8 +331,12 @@
setBooleanContents(ZeroOrOneBooleanContent);
- if (Subtarget.hasStdExtV())
+ if (Subtarget.hasStdExtV()) {
setBooleanVectorContents(ZeroOrOneBooleanContent);
+ // RVV intrinsics may have illegal operands.
+ for (auto VT : {MVT::i1, MVT::i8, MVT::i16, MVT::i32})
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, VT, Custom);
+ }
// Function alignments.
const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
@@ -1002,6 +1006,27 @@
SelectionDAG &DAG) const {
unsigned IntNo = cast(Op.getOperand(0))->getZExtValue();
SDLoc DL(Op);
+
+ if (Subtarget.hasStdExtV()) {
+ // Some RVV intrinsics may claim that they want an integer operand to be
+ // extended.
+ if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
+ RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
+ if (II->ExtendedOperand) {
+ assert(II->ExtendedOperand < Op.getNumOperands());
+ std::vector Operands(Op->op_begin(), Op->op_end());
+ SDValue &ScalarOp = Operands[II->ExtendedOperand];
+ if (ScalarOp.getValueType() == MVT::i8 ||
+ ScalarOp.getValueType() == MVT::i16 ||
+ ScalarOp.getValueType() == MVT::i32) {
+ ScalarOp = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, ScalarOp);
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
+ Operands);
+ }
+ }
+ }
+ }
+
switch (IntNo) {
default:
return SDValue(); // Don't custom lower most intrinsics.
@@ -2038,6 +2063,16 @@
RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
};
+// This is an interim calling convention and it may be changed in the future.
+static const MCPhysReg ArgVRs[] = {
+ RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20,
+ RISCV::V21, RISCV::V22, RISCV::V23
+};
+static const MCPhysReg ArgVRM2s[] = {
+ RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2
+};
+static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4};
+static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8};
// Pass a 2*XLEN argument that has been split into two XLEN values through
// registers or the stack as necessary.
@@ -2082,7 +2117,7 @@
static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
- bool IsRet, Type *OrigTy) {
+ bool IsRet, Type *OrigTy, const RISCVTargetLowering *TLI) {
unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
assert(XLen == 32 || XLen == 64);
MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
@@ -2215,7 +2250,26 @@
Reg = State.AllocateReg(ArgFPR32s);
else if (ValVT == MVT::f64 && !UseGPRForF64)
Reg = State.AllocateReg(ArgFPR64s);
- else
+ else if (ValVT.isScalableVector()) {
+ const TargetRegisterClass *RC = TLI->getRegClassFor(ValVT);
+ if (RC->hasSuperClassEq(&RISCV::VRRegClass)) {
+ Reg = State.AllocateReg(ArgVRs);
+ } else if (RC->hasSuperClassEq(&RISCV::VRM2RegClass)) {
+ Reg = State.AllocateReg(ArgVRM2s);
+ } else if (RC->hasSuperClassEq(&RISCV::VRM4RegClass)) {
+ Reg = State.AllocateReg(ArgVRM4s);
+ } else if (RC->hasSuperClassEq(&RISCV::VRM8RegClass)) {
+ Reg = State.AllocateReg(ArgVRM8s);
+ } else {
+ llvm_unreachable("Unhandled class register for ValueType");
+ }
+ if (!Reg) {
+ LocInfo = CCValAssign::Indirect;
+ // Try using a GPR to pass the address
+ Reg = State.AllocateReg(ArgGPRs);
+ LocVT = XLenVT;
+ }
+ } else
Reg = State.AllocateReg(ArgGPRs);
unsigned StackOffset =
Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
@@ -2238,8 +2292,9 @@
return false;
}
- assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT) &&
- "Expected an XLenVT at this stage");
+ assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
+ (TLI->getSubtarget().hasStdExtV() && ValVT.isScalableVector())) &&
+ "Expected an XLenVT or scalable vector types at this stage");
if (Reg) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
@@ -2274,7 +2329,7 @@
RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI();
if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
- ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
+ ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, this)) {
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n');
llvm_unreachable(nullptr);
@@ -2295,7 +2350,7 @@
RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI();
if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
- ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
+ ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, this)) {
LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << "\n");
llvm_unreachable(nullptr);
@@ -2327,29 +2382,34 @@
// The caller is responsible for loading the full value if the argument is
// passed with CCValAssign::Indirect.
static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
- const CCValAssign &VA, const SDLoc &DL) {
+ const CCValAssign &VA, const SDLoc &DL,
+ const RISCVTargetLowering *TLI) {
MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
EVT LocVT = VA.getLocVT();
SDValue Val;
const TargetRegisterClass *RC;
- switch (LocVT.getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("Unexpected register type");
- case MVT::i32:
- case MVT::i64:
- RC = &RISCV::GPRRegClass;
- break;
- case MVT::f16:
- RC = &RISCV::FPR16RegClass;
- break;
- case MVT::f32:
- RC = &RISCV::FPR32RegClass;
- break;
- case MVT::f64:
- RC = &RISCV::FPR64RegClass;
- break;
+ if (LocVT.getSimpleVT().isScalableVector()) {
+ RC = TLI->getRegClassFor(LocVT.getSimpleVT());
+ } else {
+ switch (LocVT.getSimpleVT().SimpleTy) {
+ default:
+ llvm_unreachable("Unexpected register type");
+ case MVT::i32:
+ case MVT::i64:
+ RC = &RISCV::GPRRegClass;
+ break;
+ case MVT::f16:
+ RC = &RISCV::FPR16RegClass;
+ break;
+ case MVT::f32:
+ RC = &RISCV::FPR32RegClass;
+ break;
+ case MVT::f64:
+ RC = &RISCV::FPR64RegClass;
+ break;
+ }
}
Register VReg = RegInfo.createVirtualRegister(RC);
@@ -2623,7 +2683,7 @@
if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
else if (VA.isRegLoc())
- ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
+ ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, this);
else
ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
@@ -3076,7 +3136,8 @@
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI();
if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
- ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
+ ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
+ this))
return false;
}
return true;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -14,6 +14,26 @@
///
//===----------------------------------------------------------------------===//
+// X0 has special meaning for vsetvl/vsetvli.
+// rd | rs1 | AVL value | Effect on vl
+//--------------------------------------------
+// !X0 | X0 | ~0 | Set vl to VLMAX
+// X0 | X0 | Value in vl | Keep existing vl
+def NoX0 : SDNodeXForm(N)) {
+ if (C->isNullValue()) {
+ return SDValue(CurDAG->getMachineNode(RISCV::ADDI, DL, MVT::i64,
+ CurDAG->getRegister(RISCV::X0, MVT::i64),
+ CurDAG->getTargetConstant(0, DL, MVT::i64)), 0);
+ }
+ }
+
+ return SDValue(N, 0);
+}]>;
+
//===----------------------------------------------------------------------===//
// Utilities.
//===----------------------------------------------------------------------===//
@@ -50,6 +70,18 @@
// List of EEW.
defvar EEWList = [8, 16, 32, 64];
+// We only model FPR32 for V instructions in RISCVInstrInfoV.td.
+// FP16/FP32/FP64 registers are alias each other. Convert FPR16 and FPR64
+// to FPR32 for V instructions is enough.
+class ToFPR32 {
+ dag ret = !cond(!eq(!cast(operand), !cast(FPR64)):
+ (EXTRACT_SUBREG !dag(type, [FPR64], [name]), sub_32),
+ !eq(!cast(operand), !cast(FPR16)):
+ (SUBREG_TO_REG (i16 -1), !dag(type, [FPR16], [name]), sub_16),
+ !eq(1, 1):
+ !dag(type, [operand], [name]));
+}
+
//===----------------------------------------------------------------------===//
// Vector register and vector group type information.
//===----------------------------------------------------------------------===//
@@ -122,6 +154,14 @@
let PrimaryKeyName = "getPseudoInfo";
}
+def RISCVVIntrinsicsTable : GenericTable {
+ let FilterClass = "RISCVVIntrinsic";
+ let CppTypeName = "RISCVVIntrinsicInfo";
+ let Fields = ["IntrinsicID", "ExtendOperand"];
+ let PrimaryKey = ["IntrinsicID"];
+ let PrimaryKeyName = "getRISCVVIntrinsicInfo";
+}
+
//===----------------------------------------------------------------------===//
// Helpers to define the different pseudo instructions.
//===----------------------------------------------------------------------===//
@@ -208,40 +248,156 @@
defm "" : VPseudoBinaryV_VI;
}
+multiclass VPseudoBinary_VV_VX {
+ defm "" : VPseudoBinaryV_VV;
+ defm "" : VPseudoBinaryV_VX;
+}
+
+multiclass VPseudoBinary_VX_VI {
+ defm "" : VPseudoBinaryV_VX;
+ defm "" : VPseudoBinaryV_VI;
+}
+
//===----------------------------------------------------------------------===//
// Helpers to define the different patterns.
//===----------------------------------------------------------------------===//
+class VPatBinarySDNode :
+ Pat<(result_type (vop
+ (op_type op_reg_class:$rs1),
+ (op_type op_reg_class:$rs2))),
+ (!cast(instruction_name#"_VV_"# vlmul.MX)
+ op_reg_class:$rs1,
+ op_reg_class:$rs2,
+ VLMax, sew)>;
+
+multiclass VPatBinarySDNode
+{
+ foreach vti = AllIntegerVectors in
+ def : VPatBinarySDNode;
+}
-multiclass pat_vop_binary
+class VPatBinary :
+ Pat<(result_type (!cast(intrinsic_name)
+ (op1_type op1_reg_class:$rs1),
+ (op2_type op2_kind:$rs2),
+ (i64 GPR:$vl))),
+ (!cast(inst#"_"#kind#"_"#vlmul.MX)
+ (op1_type op1_reg_class:$rs1),
+ ToFPR32.ret,
+ (NoX0 GPR:$vl), sew)>;
+
+class VPatBinaryMask :
+ Pat<(result_type (!cast(intrinsic_name#"_mask")
+ (result_type result_reg_class:$merge),
+ (op1_type op1_reg_class:$rs1),
+ (op2_type op2_kind:$rs2),
+ (mask_type V0),
+ (i64 GPR:$vl))),
+ (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
+ (result_type result_reg_class:$merge),
+ (op1_type op1_reg_class:$rs1),
+ ToFPR32.ret,
+ (mask_type V0), (NoX0 GPR:$vl), sew)>;
+
+multiclass VPatBinary
{
- defvar instruction = !cast(instruction_name#"_VV_"# vlmul.MX);
- def : Pat<(result_type (vop
- (op_type op_reg_class:$rs1),
- (op_type op_reg_class:$rs2))),
- (instruction (result_type (IMPLICIT_DEF)),
- op_reg_class:$rs1,
- op_reg_class:$rs2,
- (mask_type zero_reg),
- VLMax, sew)>;
-}
-
-multiclass pat_vop_binary_common vtilist>
+ def : VPatBinary;
+ def : VPatBinaryMask;
+}
+
+multiclass VPatBinaryV_VV {
+ foreach vti = AllIntegerVectors in
+ defm : VPatBinary;
+}
+
+multiclass VPatBinaryV_VX {
+ foreach vti = AllIntegerVectors in
+ defm : VPatBinary;
+}
+
+multiclass VPatBinaryV_VI {
+ foreach vti = AllIntegerVectors in
+ defm : VPatBinary;
+}
+
+multiclass VPatBinary_VV_VX_VI
{
- foreach vti = vtilist in
- defm : pat_vop_binary;
+ defm "" : VPatBinaryV_VV;
+ defm "" : VPatBinaryV_VX;
+ defm "" : VPatBinaryV_VI;
+}
+
+multiclass VPatBinary_VV_VX
+{
+ defm "" : VPatBinaryV_VV;
+ defm "" : VPatBinaryV_VX;
+}
+
+multiclass VPatBinary_VX_VI
+{
+ defm "" : VPatBinaryV_VX;
+ defm "" : VPatBinaryV_VI;
}
//===----------------------------------------------------------------------===//
@@ -353,8 +509,18 @@
// Pseudo instructions.
defm PseudoVADD : VPseudoBinary_VV_VX_VI;
+defm PseudoVSUB : VPseudoBinary_VV_VX;
+defm PseudoVRSUB : VPseudoBinary_VX_VI;
+
+//===----------------------------------------------------------------------===//
+// Patterns.
+//===----------------------------------------------------------------------===//
// Whole-register vector patterns.
-defm "" : pat_vop_binary_common;
+defm "" : VPatBinarySDNode;
+
+defm "" : VPatBinary_VV_VX_VI<"int_riscv_vadd", "PseudoVADD">;
+defm "" : VPatBinary_VV_VX<"int_riscv_vsub", "PseudoVSUB">;
+defm "" : VPatBinary_VX_VI<"int_riscv_vrsub", "PseudoVRSUB">;
} // Predicates = [HasStdExtV]
diff --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
--- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
@@ -413,6 +413,20 @@
} // end namespace RISCVVPseudosTable
+namespace RISCVVIntrinsicsTable {
+
+struct RISCVVIntrinsicInfo {
+ unsigned int IntrinsicID;
+ unsigned int ExtendedOperand;
+};
+
+using namespace RISCV;
+
+#define GET_RISCVVIntrinsicsTable_DECL
+#include "RISCVGenSearchableTables.inc"
+
+} // end namespace RISCVVIntrinsicsTable
+
} // namespace llvm
#endif
diff --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
--- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
+++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
@@ -14,6 +14,7 @@
#include "RISCVBaseInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
@@ -94,6 +95,13 @@
} // namespace RISCVFeatures
+namespace RISCVVIntrinsicsTable {
+
+#define GET_RISCVVIntrinsicsTable_IMPL
+#include "RISCVGenSearchableTables.inc"
+
+} // namespace RISCVVIntrinsicsTable
+
namespace RISCVVPseudosTable {
#define GET_RISCVVPseudosTable_IMPL
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll
@@ -0,0 +1,2392 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+
+declare @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ ,
+ ,
+ i64);
+
+define @test( %0, %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: test
+; CHECK: vsetvli {{.*}}, a0, e8,mf8
+; CHECK: vadd.vv v16, v16, v17
+ %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ %0,
+ %1,
+ i64 %2)
+
+ ret %a
+}
+
+define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf8
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf8
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf4
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf4
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf2
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf2
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, a0, e8,m1
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, a0, e8,m1
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, a0, e8,m2
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, a0, e8,m2
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, a0, e8,m4
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, a0, e8,m4
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, a0, e8,m8
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, a0, e8,m8
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf4
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf4
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf2
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
+ ,
+ ,
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf2
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
+ undef,
+ undef,
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ ,
+ ,
+ i64);
+
+define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16() nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, a0, e16,m1
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ undef,
+ undef,
+ i64 undef)
+
+ ret %a
+}
+
+declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
+ ,
+ ,
+