diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -169,6 +169,13 @@ // This builtin has a granted vector length parameter in the last position. bit HasVL = true; + // Normally, intrinsics have the policy argument if it is masked and + // have no policy argument if it is unmasked. When HasPolicy is false, it + // means the intrinsic has no policy argument regardless masked or unmasked. + // For example, when the output result is mask type or scalar type, there is + // no need to specify the policy. + bit HasPolicy = true; + // This builtin supports non-masked function overloading api. // All masked operations support overloading api. bit HasNoMaskedOverloaded = true; @@ -1647,3 +1654,14 @@ } } } + +class RVVHeader +{ + code HeaderCode; +} + +let HeaderCode = [{ +#define VE_TAIL_UNDISTURBED 0 +#define VE_TAIL_AGNOSTIC 1 +}] in +def policy : RVVHeader; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c @@ -0,0 +1,44 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s + +#include + + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vadd_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_tu (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_UNDISTURBED); +} + +// CHECK-RV64-LABEL: @test_vadd_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_ta (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -156,6 +156,7 @@ bool IsMask; bool HasMaskedOffOperand; bool HasVL; + bool HasPolicy; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; @@ -169,8 +170,9 @@ public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef IRName, bool HasSideEffects, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded, - bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, + bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, + bool HasNoMaskedOverloaded, bool HasAutoDef, + StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, StringRef RequiredExtension); ~RVVIntrinsic() = default; @@ -180,6 +182,7 @@ bool hasSideEffects() const { return HasSideEffects; } bool hasMaskedOffOperand() const { return HasMaskedOffOperand; } bool hasVL() const { return HasVL; } + bool hasPolicy() const { return HasPolicy; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } @@ -195,6 +198,9 @@ // init the RVVIntrinsic ID and IntrinsicTypes. void emitCodeGenSwitchBody(raw_ostream &o) const; + // Emit the define macors for mask intrinsics using _mt intrinsics. + void emitIntrinsicMaskMacro(raw_ostream &o) const; + // Emit the macros for mapping C/C++ intrinsic function to builtin functions. void emitIntrinsicMacro(raw_ostream &o) const; @@ -227,6 +233,8 @@ private: /// Create all intrinsics and add them to \p Out void createRVVIntrinsics(std::vector> &Out); + /// Create Headers and add them to \p Out + void createRVVHeaders(raw_ostream &OS); /// Compute output and input types by applying different config (basic type /// and LMUL with type transformers). It also record result of type in legal /// or illegal set to avoid compute the same config again. The result maybe @@ -631,7 +639,7 @@ ScalarType = ScalarTypeKind::SignedLong; break; default: - PrintFatalError("Illegal primitive type transformers!"); + PrintFatalError("Illegal primitive type transformers: " + PType); } Transformer = Transformer.drop_back(); @@ -745,15 +753,15 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, StringRef NewMangledName, StringRef IRName, bool HasSideEffects, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, + bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, StringRef RequiredExtension) : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), - HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), - ManualCodegen(ManualCodegen.str()) { + HasPolicy(HasPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), + HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()) { // Init Name and MangledName Name = NewName.str(); @@ -765,6 +773,8 @@ Name += "_" + Suffix.str(); if (IsMask) { Name += "_m"; + if (HasPolicy) + Name += "t"; } // Init RISC-V extensions for (const auto &T : OutInTypes) { @@ -813,7 +823,10 @@ if (isMask()) { if (hasVL()) { - OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; + if (hasPolicy()) + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);\n"; + else + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } @@ -853,6 +866,24 @@ OS << ")\n"; } +void RVVIntrinsic::emitIntrinsicMaskMacro(raw_ostream &OS) const { + OS << "#define " << getName().drop_back() << "("; + if (!InputTypes.empty()) { + ListSeparator LS; + for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) + OS << LS << "op" << i; + } + OS << ") \\\n"; + OS << "__builtin_rvv_" << getName() << "("; + ListSeparator LS; + if (!InputTypes.empty()) { + for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) + OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")"; + } + OS << LS << "(size_t)VE_TAIL_AGNOSTIC"; + OS << ")\n"; +} + void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const { OS << "__attribute__((clang_builtin_alias("; OS << "__builtin_rvv_" << getName() << ")))\n"; @@ -898,6 +929,8 @@ OS << "extern \"C\" {\n"; OS << "#endif\n\n"; + createRVVHeaders(OS); + std::vector> Defs; createRVVIntrinsics(Defs); @@ -965,6 +998,12 @@ Inst.emitIntrinsicMacro(OS); }); + // Use _mt to implement _m intrinsics. + emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { + if (Inst.isMask() && Inst.hasPolicy()) + Inst.emitIntrinsicMaskMacro(OS); + }); + OS << "#define __riscv_v_intrinsic_overloading 1\n"; // Print Overloaded APIs @@ -1066,6 +1105,7 @@ bool HasMask = R->getValueAsBit("HasMask"); bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); + bool HasPolicy = R->getValueAsBit("HasPolicy"); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); bool HasSideEffects = R->getValueAsBit("HasSideEffects"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); @@ -1104,6 +1144,10 @@ ProtoMaskSeq.push_back("z"); } + if (HasPolicy) { + ProtoMaskSeq.push_back("Kz"); + } + // Create Intrinsics for each type and LMUL. for (char I : TypeRange) { for (int Log2LMUL : Log2LMULList) { @@ -1116,7 +1160,7 @@ // Create a non-mask intrinsic Out.push_back(std::make_unique( Name, SuffixStr, MangledName, IRName, HasSideEffects, - /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, + /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), IntrinsicTypes, RequiredExtension)); if (HasMask) { @@ -1125,7 +1169,7 @@ computeTypes(I, Log2LMUL, ProtoMaskSeq); Out.push_back(std::make_unique( Name, SuffixStr, MangledName, IRNameMask, HasSideEffects, - /*IsMask=*/true, HasMaskedOffOperand, HasVL, + /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, RequiredExtension)); } @@ -1134,6 +1178,15 @@ } } +void RVVEmitter::createRVVHeaders(raw_ostream &OS) { + std::vector RVVHeaders = + Records.getAllDerivedDefinitions("RVVHeader"); + for (auto *R : RVVHeaders) { + StringRef HeaderCodeStr = R->getValueAsString("HeaderCode"); + OS << HeaderCodeStr.str(); + } +} + Optional RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, ArrayRef PrototypeSeq) { diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -338,6 +338,14 @@ [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 3; } + class RISCVBinaryAAXMaskTA + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>], + [IntrNoMem]>, RISCVVIntrinsic { + let SplatOperand = 3; + } // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. // Input: (vector_in, vector_in/scalar_in, vl) @@ -817,6 +825,10 @@ def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask; } + multiclass RISCVBinaryAAXWithPolicy { + def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskTA; + } // Like RISCVBinaryAAX, but the second operand is used a shift amount so it // must be a vector or an XLen scalar. multiclass RISCVBinaryAAShift { @@ -960,7 +972,7 @@ defm vamominu : RISCVAMO; defm vamomaxu : RISCVAMO; - defm vadd : RISCVBinaryAAX; + defm vadd : RISCVBinaryAAXWithPolicy; defm vsub : RISCVBinaryAAX; defm vrsub : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -76,6 +76,9 @@ // explicit operand. Used by RVV Pseudos. HasVLOpShift = HasSEWOpShift + 1, HasVLOpMask = 1 << HasVLOpShift, + + HasPolicyOpShift = HasVLOpShift + 1, + HasPolicyOpMask = 1 << HasPolicyOpShift, }; // Match with the definitions in RISCVInstrFormatsV.td @@ -132,6 +135,10 @@ return TSFlags & HasVLOpMask; } +static inline bool hasPolicyOp(uint64_t TSFlags) { + return TSFlags & HasPolicyOpMask; +} + // RISC-V Specific Machine Operand Flags enum { MO_None = 0, diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -365,7 +365,9 @@ RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags); - unsigned Log2SEW = MI.getOperand(NumOperands - 1).getImm(); + unsigned Log2SEWIndex = + RISCVII::hasPolicyOp(TSFlags) ? NumOperands - 2 : NumOperands - 1; + unsigned Log2SEW = MI.getOperand(Log2SEWIndex).getImm(); // A Log2SEW of 0 is an operation on mask registers only. bool MaskRegOp = Log2SEW == 0; unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; @@ -393,6 +395,12 @@ } } + // If the instruction has policy argument, use the argument. + if (RISCVII::hasPolicyOp(TSFlags)) { + const MachineOperand &Op = MI.getOperand(NumOperands - 1); + TailAgnostic = Op.getImm() & 0x1; + } + if (RISCVII::hasVLOp(TSFlags)) { const MachineOperand &VLOp = MI.getOperand(MI.getNumExplicitOperands() - 2); if (VLOp.isImm()) diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -178,6 +178,9 @@ bit HasVLOp = 0; let TSFlags{15} = HasVLOp; + + bit HasPolicyOp = false; + let TSFlags{16} = HasPolicyOp; } // Pseudo instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -975,6 +975,26 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoBinaryMaskWithPolicy : + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, uimm5:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasPolicyOp = true; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + // Like VPseudoBinaryMask, but output can be V0. class VPseudoBinaryMOutMask { + let VLMul = MInfo.value in { + def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskWithPolicy; + } +} + multiclass VPseudoBinaryM; } +multiclass VPseudoBinaryV_VV_WithPolicy { + foreach m = MxList.m in + defm _VV : VPseudoBinaryWithPolicy; +} + multiclass VPseudoBinaryV_VV_EEW { foreach m = MxList.m in { foreach sew = EEWList in { @@ -1561,6 +1599,11 @@ defm "_VX" : VPseudoBinary; } +multiclass VPseudoBinaryV_VX_WithPolicy { + foreach m = MxList.m in + defm "_VX" : VPseudoBinaryWithPolicy; +} + multiclass VPseudoBinaryV_VF { foreach m = MxList.m in foreach f = FPList.fpinfo in @@ -1573,6 +1616,11 @@ defm _VI : VPseudoBinary; } +multiclass VPseudoBinaryV_VI_WithPolicy { + foreach m = MxList.m in + defm _VI : VPseudoBinaryWithPolicy; +} + multiclass VPseudoBinaryM_MM { foreach m = MxList.m in let VLMul = m.value in { @@ -1801,6 +1849,12 @@ defm "" : VPseudoBinaryV_VI; } +multiclass VPseudoBinaryV_VV_VX_VI_WithPolicy { + defm "" : VPseudoBinaryV_VV_WithPolicy; + defm "" : VPseudoBinaryV_VX_WithPolicy; + defm "" : VPseudoBinaryV_VI_WithPolicy; +} + multiclass VPseudoBinaryV_VV_VX { defm "" : VPseudoBinaryV_VV; defm "" : VPseudoBinaryV_VX; @@ -2309,6 +2363,28 @@ (op2_type op2_kind:$rs2), (mask_type V0), GPR:$vl, sew)>; +class VPatBinaryMaskWithPolicy : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + VLOpFrag, (XLenVT uimm5:$policy))), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, sew, (XLenVT uimm5:$policy))>; + // Same as above but source operands are swapped. class VPatBinaryMaskSwapped; } +multiclass VPatBinaryWithPolicy +{ + def : VPatBinaryNoMask; + def : VPatBinaryMaskWithPolicy; +} + multiclass VPatBinarySwapped; } +multiclass VPatBinaryV_VV_WithPolicy vtilist> { + foreach vti = vtilist in + defm : VPatBinaryWithPolicy; +} + multiclass VPatBinaryV_VV_INT vtilist> { foreach vti = vtilist in { @@ -2694,6 +2797,17 @@ } } +multiclass VPatBinaryV_VX_WithPolicy vtilist> { + foreach vti = vtilist in { + defvar kind = "V"#vti.ScalarSuffix; + defm : VPatBinaryWithPolicy; + } +} + multiclass VPatBinaryV_VX_INT vtilist> { foreach vti = vtilist in @@ -2712,6 +2826,15 @@ vti.RegClass, imm_type>; } +multiclass VPatBinaryV_VI_WithPolicy vtilist, Operand imm_type> { + foreach vti = vtilist in + defm : VPatBinaryWithPolicy; +} + multiclass VPatBinaryM_MM { foreach mti = AllMasks in def : VPatBinaryNoMask, VPatBinaryV_VI; +multiclass VPatBinaryV_VV_VX_VI_WithPolicy vtilist, Operand ImmType = simm5> + : VPatBinaryV_VV_WithPolicy, + VPatBinaryV_VX_WithPolicy, + VPatBinaryV_VI_WithPolicy; + multiclass VPatBinaryV_VV_VX vtilist> : VPatBinaryV_VV, @@ -3398,7 +3527,7 @@ //===----------------------------------------------------------------------===// // 12.1. Vector Single-Width Integer Add and Subtract //===----------------------------------------------------------------------===// -defm PseudoVADD : VPseudoBinaryV_VV_VX_VI; +defm PseudoVADD : VPseudoBinaryV_VV_VX_VI_WithPolicy; defm PseudoVSUB : VPseudoBinaryV_VV_VX; defm PseudoVRSUB : VPseudoBinaryV_VX_VI; @@ -3446,7 +3575,7 @@ (NegImm simm5_plus1:$rs2), (vti.Mask V0), GPR:$vl, - vti.Log2SEW)>; + vti.Log2SEW, 1)>; } //===----------------------------------------------------------------------===// @@ -3958,7 +4087,7 @@ //===----------------------------------------------------------------------===// // 12.1. Vector Single-Width Integer Add and Subtract //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>; +defm : VPatBinaryV_VV_VX_VI_WithPolicy<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>; defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>; defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -269,6 +269,36 @@ VMV0:$vm, GPR:$vl, sew)>; } +multiclass VPatBinaryVL_VV_WithPolicy { + def : Pat<(result_type (vop + (op_type op_reg_class:$rs1), + (op_type op_reg_class:$rs2), + (mask_type true_mask), + VLOpFrag)), + (!cast(instruction_name#"_VV_"# vlmul.MX) + op_reg_class:$rs1, + op_reg_class:$rs2, + GPR:$vl, sew)>; + def : Pat<(result_type (vop + (op_type op_reg_class:$rs1), + (op_type op_reg_class:$rs2), + (mask_type VMV0:$vm), + VLOpFrag)), + (!cast(instruction_name#"_VV_"# vlmul.MX#"_MASK") + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + VMV0:$vm, GPR:$vl, sew, 0)>; +} + multiclass VPatBinaryVL_XI; } +multiclass VPatBinaryVL_XI_WithPolicy { + def : Pat<(result_type (vop + (vop_type vop_reg_class:$rs1), + (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))), + (mask_type true_mask), + VLOpFrag)), + (!cast(instruction_name#_#suffix#_# vlmul.MX) + vop_reg_class:$rs1, + xop_kind:$rs2, + GPR:$vl, sew)>; + def : Pat<(result_type (vop + (vop_type vop_reg_class:$rs1), + (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))), + (mask_type VMV0:$vm), + VLOpFrag)), + (!cast(instruction_name#_#suffix#_# vlmul.MX#"_MASK") + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + xop_kind:$rs2, + VMV0:$vm, GPR:$vl, sew, 0)>; +} + multiclass VPatBinaryVL_VV_VX { foreach vti = AllIntegerVectors in { defm : VPatBinaryVL_VV { + foreach vti = AllIntegerVectors in { + defm : VPatBinaryVL_VV_WithPolicy; + defm : VPatBinaryVL_XI_WithPolicy; + } +} + multiclass VPatBinaryVL_VV_VX_VI : VPatBinaryVL_VV_VX { @@ -326,6 +401,18 @@ } } +multiclass VPatBinaryVL_VV_VX_VI_WithPolicy + : VPatBinaryVL_VV_VX_WithPolicy { + foreach vti = AllIntegerVectors in { + defm : VPatBinaryVL_XI_WithPolicy(SplatPat#_#ImmType), + ImmType>; + } +} + class VPatBinaryVL_VF; +defm : VPatBinaryVL_VV_VX_VI_WithPolicy; defm : VPatBinaryVL_VV_VX; // Handle VRSUB specially since it's the only integer binary op with reversed // pattern operands diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll @@ -0,0 +1,65 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + , + , + , + , + i64, i64); + +define @intrinsic_vadd_mask_tu( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_tu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 3, e8, m1, tu, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4, i64 0) + + ret %a +} + +define @intrinsic_vadd_mask_ta( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_ta: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 3, e8, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} +