diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td --- a/llvm/lib/Target/RISCV/RISCV.td +++ b/llvm/lib/Target/RISCV/RISCV.td @@ -149,6 +149,9 @@ AssemblerPredicate<(all_of FeatureStdExtV), "'V' (Vector Instructions)">; +def HasVInstructions : Predicate<"Subtarget->hasVInstructions()">; +def HasVInstructionsF32 : Predicate<"Subtarget->hasVInstructionsF32()">; + def FeatureStdExtZvlsseg : SubtargetFeature<"experimental-zvlsseg", "HasStdExtZvlsseg", "true", "'Zvlsseg' (Vector segment load/store instructions)", diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -866,7 +866,7 @@ } static bool hasRVVSpillWithFIs(MachineFunction &MF, const RISCVInstrInfo &TII) { - if (!MF.getSubtarget().hasStdExtV()) + if (!MF.getSubtarget().hasVInstructions()) return false; return any_of(MF, [&TII](const MachineBasicBlock &MBB) { return any_of(MBB, [&TII](const MachineInstr &MI) { diff --git a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp @@ -431,7 +431,7 @@ auto &TPC = getAnalysis(); auto &TM = TPC.getTM(); ST = &TM.getSubtarget(F); - if (!ST->hasStdExtV() || !ST->useRVVForFixedLengthVectors()) + if (!ST->hasVInstructions() || !ST->useRVVForFixedLengthVectors()) return false; TLI = ST->getTargetLowering(); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -935,7 +935,7 @@ case Intrinsic::riscv_vsetvli: case Intrinsic::riscv_vsetvlimax: { - if (!Subtarget->hasStdExtV()) + if (!Subtarget->hasVInstructions()) break; bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -109,7 +109,7 @@ static const MVT::SimpleValueType F64VecVTs[] = { MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; - if (Subtarget.hasStdExtV()) { + if (Subtarget.hasVInstructions()) { auto addRegClassForRVV = [this](MVT VT) { unsigned Size = VT.getSizeInBits().getKnownMinValue(); assert(Size <= 512 && isPowerOf2_32(Size)); @@ -128,18 +128,22 @@ for (MVT VT : BoolVecVTs) addRegClassForRVV(VT); - for (MVT VT : IntVecVTs) + for (MVT VT : IntVecVTs) { + if (VT.getVectorElementType() == MVT::i64 && + !Subtarget.hasVInstructionsI64()) + continue; addRegClassForRVV(VT); + } - if (Subtarget.hasStdExtZfh()) + if (Subtarget.hasVInstructionsF16()) for (MVT VT : F16VecVTs) addRegClassForRVV(VT); - if (Subtarget.hasStdExtF()) + if (Subtarget.hasVInstructionsF32()) for (MVT VT : F32VecVTs) addRegClassForRVV(VT); - if (Subtarget.hasStdExtD()) + if (Subtarget.hasVInstructionsF64()) for (MVT VT : F64VecVTs) addRegClassForRVV(VT); @@ -418,7 +422,7 @@ setBooleanContents(ZeroOrOneBooleanContent); - if (Subtarget.hasStdExtV()) { + if (Subtarget.hasVInstructions()) { setBooleanVectorContents(ZeroOrOneBooleanContent); setOperationAction(ISD::VSCALE, XLenVT, Custom); @@ -522,6 +526,10 @@ } for (MVT VT : IntVecVTs) { + if (VT.getVectorElementType() == MVT::i64 && + !Subtarget.hasVInstructionsI64()) + continue; + setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); @@ -684,18 +692,18 @@ } }; - if (Subtarget.hasStdExtZfh()) + if (Subtarget.hasVInstructionsF16()) for (MVT VT : F16VecVTs) SetCommonVFPActions(VT); for (MVT VT : F32VecVTs) { - if (Subtarget.hasStdExtF()) + if (Subtarget.hasVInstructionsF32()) SetCommonVFPActions(VT); SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); } for (MVT VT : F64VecVTs) { - if (Subtarget.hasStdExtD()) + if (Subtarget.hasVInstructionsF64()) SetCommonVFPActions(VT); SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs); @@ -925,7 +933,7 @@ setTargetDAGCombine(ISD::XOR); setTargetDAGCombine(ISD::ANY_EXTEND); setTargetDAGCombine(ISD::ZERO_EXTEND); - if (Subtarget.hasStdExtV()) { + if (Subtarget.hasVInstructions()) { setTargetDAGCombine(ISD::FCOPYSIGN); setTargetDAGCombine(ISD::MGATHER); setTargetDAGCombine(ISD::MSCATTER); @@ -943,7 +951,7 @@ EVT VT) const { if (!VT.isVector()) return getPointerTy(DL); - if (Subtarget.hasStdExtV() && + if (Subtarget.hasVInstructions() && (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors())) return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount()); return VT.changeVectorElementTypeToInteger(); @@ -1089,7 +1097,7 @@ Instruction *I, SmallVectorImpl &Ops) const { using namespace llvm::PatternMatch; - if (!I->getType()->isVectorTy() || !Subtarget.hasStdExtV()) + if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions()) return false; auto IsSinker = [&](Instruction *I, int Operand) { @@ -1347,15 +1355,18 @@ return true; if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) || - ScalarTy->isIntegerTy(32) || ScalarTy->isIntegerTy(64)) + ScalarTy->isIntegerTy(32)) return true; + if (ScalarTy->isIntegerTy(64)) + return Subtarget.hasVInstructionsI64(); + if (ScalarTy->isHalfTy()) - return Subtarget.hasStdExtZfh(); + return Subtarget.hasVInstructionsF16(); if (ScalarTy->isFloatTy()) - return Subtarget.hasStdExtF(); + return Subtarget.hasVInstructionsF32(); if (ScalarTy->isDoubleTy()) - return Subtarget.hasStdExtD(); + return Subtarget.hasVInstructionsF64(); return false; } @@ -1391,18 +1402,21 @@ case MVT::i8: case MVT::i16: case MVT::i32: + break; case MVT::i64: + if (!Subtarget.hasVInstructionsI64()) + return false; break; case MVT::f16: - if (!Subtarget.hasStdExtZfh()) + if (!Subtarget.hasVInstructionsF16()) return false; break; case MVT::f32: - if (!Subtarget.hasStdExtF()) + if (!Subtarget.hasVInstructionsF32()) return false; break; case MVT::f64: - if (!Subtarget.hasStdExtD()) + if (!Subtarget.hasVInstructionsF64()) return false; break; } @@ -3764,7 +3778,7 @@ Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && "Unexpected opcode"); - if (!Subtarget.hasStdExtV()) + if (!Subtarget.hasVInstructions()) return SDValue(); bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; @@ -8062,7 +8076,7 @@ } assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || - (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && + (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) && "Expected an XLenVT or vector types at this stage"); if (Reg) { @@ -8098,7 +8112,7 @@ FunctionType *FType = MF.getFunction().getFunctionType(); Optional FirstMaskArgument; - if (Subtarget.hasStdExtV()) + if (Subtarget.hasVInstructions()) FirstMaskArgument = preAssignMask(Ins); for (unsigned i = 0; i != NumArgs; ++i) { @@ -8129,7 +8143,7 @@ unsigned NumArgs = Outs.size(); Optional FirstMaskArgument; - if (Subtarget.hasStdExtV()) + if (Subtarget.hasVInstructions()) FirstMaskArgument = preAssignMask(Outs); for (unsigned i = 0; i != NumArgs; i++) { @@ -8974,7 +8988,7 @@ CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); Optional FirstMaskArgument; - if (Subtarget.hasStdExtV()) + if (Subtarget.hasVInstructions()) FirstMaskArgument = preAssignMask(Outs); for (unsigned i = 0, e = Outs.size(); i != e; ++i) { @@ -9407,7 +9421,7 @@ } } - if (Subtarget.hasStdExtV()) { + if (Subtarget.hasVInstructions()) { Register VReg = StringSwitch(Constraint.lower()) .Case("{v0}", RISCV::V0) .Case("{v1}", RISCV::V1) diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1009,7 +1009,7 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) { // Skip if the vector extension is not enabled. const RISCVSubtarget &ST = MF.getSubtarget(); - if (!ST.hasStdExtV()) + if (!ST.hasVInstructions()) return false; TII = ST.getInstrInfo(); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3458,7 +3458,7 @@ // Pseudo instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { //===----------------------------------------------------------------------===// // Pseudo Instructions for CodeGen @@ -3819,9 +3819,9 @@ defm PseudoVNCLIPU : VPseudoBinaryV_WV_WX_WI; } -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { //===----------------------------------------------------------------------===// // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// @@ -3954,9 +3954,9 @@ defm PseudoVFNCVT_F_X : VPseudoConversionV_W; defm PseudoVFNCVT_F_F : VPseudoConversionV_W; defm PseudoVFNCVT_ROD_F_F : VPseudoConversionV_W; -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { //===----------------------------------------------------------------------===// // 15.1. Vector Single-Width Integer Reduction Instructions //===----------------------------------------------------------------------===// @@ -3974,9 +3974,9 @@ //===----------------------------------------------------------------------===// defm PseudoVWREDSUMU : VPseudoReductionV_VS; defm PseudoVWREDSUM : VPseudoReductionV_VS; -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { //===----------------------------------------------------------------------===// // 15.3. Vector Single-Width Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// @@ -3991,7 +3991,7 @@ defm PseudoVFWREDUSUM : VPseudoReductionV_VS; defm PseudoVFWREDOSUM : VPseudoReductionV_VS; -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] //===----------------------------------------------------------------------===// // 16. Vector Mask Instructions @@ -4059,7 +4059,7 @@ // 17.1. Integer Scalar Move Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { foreach m = MxList.m in { let VLMul = m.value in { @@ -4076,13 +4076,13 @@ } } } -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] //===----------------------------------------------------------------------===// // 17.2. Floating-Point Scalar Move Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { foreach m = MxList.m in { foreach f = FPList.fpinfo in { @@ -4104,22 +4104,22 @@ } } } -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] //===----------------------------------------------------------------------===// // 17.3. Vector Slide Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { defm PseudoVSLIDEUP : VPseudoTernaryV_VX_VI; defm PseudoVSLIDEDOWN : VPseudoTernaryV_VX_VI; defm PseudoVSLIDE1UP : VPseudoBinaryV_VX<"@earlyclobber $rd">; defm PseudoVSLIDE1DOWN : VPseudoBinaryV_VX; -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { defm PseudoVFSLIDE1UP : VPseudoBinaryV_VF<"@earlyclobber $rd">; defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VF; -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] //===----------------------------------------------------------------------===// // 17.4. Vector Register Gather Instructions @@ -4151,15 +4151,15 @@ defm : VPatAMOV_WD<"int_riscv_vamomaxu", "PseudoVAMOMAXU", AllIntegerVectors>; } // Predicates = [HasStdExtZvamo] -let Predicates = [HasStdExtZvamo, HasStdExtF] in { +let Predicates = [HasStdExtZvamo, HasVInstructionsF32] in { defm : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllFloatVectors>; -} // Predicates = [HasStdExtZvamo, HasStdExtF] +} // Predicates = [HasStdExtZvamo, HasVInstructionsF32] //===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { //===----------------------------------------------------------------------===// // 12.1. Vector Single-Width Integer Add and Subtract //===----------------------------------------------------------------------===// @@ -4475,9 +4475,9 @@ defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>; defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>; -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { //===----------------------------------------------------------------------===// // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// @@ -4623,9 +4623,9 @@ defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">; defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">; defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">; -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { //===----------------------------------------------------------------------===// // 15.1. Vector Single-Width Integer Reduction Instructions //===----------------------------------------------------------------------===// @@ -4643,9 +4643,9 @@ //===----------------------------------------------------------------------===// defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">; defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">; -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { //===----------------------------------------------------------------------===// // 15.3. Vector Single-Width Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// @@ -4660,13 +4660,13 @@ defm : VPatReductionW_VS<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", /*IsFloat=*/1>; defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>; -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] //===----------------------------------------------------------------------===// // 16. Vector Mask Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { //===----------------------------------------------------------------------===// // 16.1 Vector Mask-Register Logical Instructions //===----------------------------------------------------------------------===// @@ -4718,7 +4718,7 @@ //===----------------------------------------------------------------------===// defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">; -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] //===----------------------------------------------------------------------===// // 17. Vector Permutation Instructions @@ -4728,19 +4728,19 @@ // 17.1. Integer Scalar Move Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { foreach vti = AllIntegerVectors in { def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)), (!cast("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>; // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td } -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] //===----------------------------------------------------------------------===// // 17.2. Floating-Point Scalar Move Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { foreach fvti = AllFloatVectors in { defvar instr = !cast("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" # fvti.LMul.MX); @@ -4755,52 +4755,52 @@ (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl, fvti.Log2SEW)>; } -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] //===----------------------------------------------------------------------===// // 17.3. Vector Slide Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>; defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>; defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>; defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>; -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>; defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>; defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>; defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>; -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] //===----------------------------------------------------------------------===// // 17.4. Vector Register Gather Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", AllIntegerVectors, uimm5>; defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", /* eew */ 16, AllIntegerVectors>; -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", AllFloatVectors, uimm5>; defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", /* eew */ 16, AllFloatVectors>; -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] //===----------------------------------------------------------------------===// // 17.5. Vector Compress Instruction //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { +let Predicates = [HasVInstructions] in { defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; -} // Predicates = [HasStdExtV] +} // Predicates = [HasVInstructions] -let Predicates = [HasStdExtV, HasStdExtF] in { +let Predicates = [HasVInstructionsF32] in { defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; -} // Predicates = [HasStdExtV, HasStdExtF] +} // Predicates = [HasVInstructionsF32] // Include the non-intrinsic ISel patterns include "RISCVInstrInfoVSDPatterns.td" diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -131,8 +131,15 @@ assert(i < RISCV::NUM_TARGET_REGS && "Register out of range"); return UserReservedRegister[i]; } + + // Vector codegen related methods. + bool hasVInstructions() const { return HasStdExtV; } + bool hasVInstructionsI64() const { return HasStdExtV; } + bool hasVInstructionsF16() const { return HasStdExtV && hasStdExtZfh(); } + bool hasVInstructionsF32() const { return HasStdExtV && hasStdExtF(); } + bool hasVInstructionsF64() const { return HasStdExtV && hasStdExtD(); } unsigned getMaxInterleaveFactor() const { - return hasStdExtV() ? MaxInterleaveFactor : 1; + return hasVInstructions() ? MaxInterleaveFactor : 1; } protected: diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp --- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp @@ -111,7 +111,8 @@ } unsigned RISCVSubtarget::getMaxRVVVectorSizeInBits() const { - assert(hasStdExtV() && "Tried to get vector length without V support!"); + assert(hasVInstructions() && + "Tried to get vector length without Zve or V extension support!"); if (RVVVectorBitsMax == 0) return 0; assert(RVVVectorBitsMax >= 128 && RVVVectorBitsMax <= 65536 && @@ -126,8 +127,8 @@ } unsigned RISCVSubtarget::getMinRVVVectorSizeInBits() const { - assert(hasStdExtV() && - "Tried to get vector length without V extension support!"); + assert(hasVInstructions() && + "Tried to get vector length without Zve or V extension support!"); assert((RVVVectorBitsMin == 0 || (RVVVectorBitsMin >= 128 && RVVVectorBitsMax <= 65536 && isPowerOf2_32(RVVVectorBitsMin))) && @@ -143,8 +144,8 @@ } unsigned RISCVSubtarget::getMaxLMULForFixedLengthVectors() const { - assert(hasStdExtV() && - "Tried to get maximum LMUL without V extension support!"); + assert(hasVInstructions() && + "Tried to get vector length without Zve or V extension support!"); assert(RVVVectorLMULMax <= 8 && isPowerOf2_32(RVVVectorLMULMax) && "V extension requires a LMUL to be at most 8 and a power of 2!"); return PowerOf2Floor( @@ -152,8 +153,8 @@ } unsigned RISCVSubtarget::getMaxELENForFixedLengthVectors() const { - assert(hasStdExtV() && - "Tried to get maximum ELEN without V extension support!"); + assert(hasVInstructions() && + "Tried to get maximum ELEN without Zve or V extension support!"); assert(RVVVectorELENMax <= 64 && RVVVectorELENMax >= 8 && isPowerOf2_32(RVVVectorELENMax) && "V extension requires a ELEN to be a power of 2 between 8 and 64!"); @@ -162,5 +163,5 @@ } bool RISCVSubtarget::useRVVForFixedLengthVectors() const { - return hasStdExtV() && getMinRVVVectorSizeInBits() != 0; + return hasVInstructions() && getMinRVVVectorSizeInBits() != 0; } diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -55,7 +55,7 @@ TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth); bool shouldExpandReduction(const IntrinsicInst *II) const; - bool supportsScalableVectors() const { return ST->hasStdExtV(); } + bool supportsScalableVectors() const { return ST->hasVInstructions(); } Optional getMaxVScale() const; TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { @@ -64,17 +64,17 @@ return TypeSize::getFixed(ST->getXLen()); case TargetTransformInfo::RGK_FixedWidthVector: return TypeSize::getFixed( - ST->hasStdExtV() ? ST->getMinRVVVectorSizeInBits() : 0); + ST->hasVInstructions() ? ST->getMinRVVVectorSizeInBits() : 0); case TargetTransformInfo::RGK_ScalableVector: return TypeSize::getScalable( - ST->hasStdExtV() ? RISCV::RVVBitsPerBlock : 0); + ST->hasVInstructions() ? RISCV::RVVBitsPerBlock : 0); } llvm_unreachable("Unsupported register kind"); } unsigned getMinVectorRegisterBitWidth() const { - return ST->hasStdExtV() ? ST->getMinRVVVectorSizeInBits() : 0; + return ST->hasVInstructions() ? ST->getMinRVVVectorSizeInBits() : 0; } InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, @@ -84,7 +84,7 @@ const Instruction *I); bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) { - if (!ST->hasStdExtV()) + if (!ST->hasVInstructions()) return false; // Only support fixed vectors if we know the minimum vector size. @@ -112,7 +112,7 @@ } bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) { - if (!ST->hasStdExtV()) + if (!ST->hasVInstructions()) return false; // Only support fixed vectors if we know the minimum vector size. @@ -149,7 +149,7 @@ bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { - if (!ST->hasStdExtV()) + if (!ST->hasVInstructions()) return false; if (!VF.isScalable()) diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -132,7 +132,7 @@ // know whether the LoopVectorizer is safe to do or not. // We only consider to use single vector register (LMUL = 1) to vectorize. unsigned MaxVectorSizeInBits = ST->getMaxRVVVectorSizeInBits(); - if (ST->hasStdExtV() && MaxVectorSizeInBits != 0) + if (ST->hasVInstructions() && MaxVectorSizeInBits != 0) return MaxVectorSizeInBits / RISCV::RVVBitsPerBlock; return BaseT::getMaxVScale(); }