diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1885,7 +1885,6 @@ bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) { if (N.getOpcode() != ISD::SPLAT_VECTOR && - N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 && N.getOpcode() != RISCVISD::VMV_V_X_VL) return false; SplatVal = N.getOperand(0); @@ -1899,18 +1898,17 @@ const RISCVSubtarget &Subtarget, ValidateFn ValidateImm) { if ((N.getOpcode() != ISD::SPLAT_VECTOR && - N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 && N.getOpcode() != RISCVISD::VMV_V_X_VL) || !isa(N.getOperand(0))) return false; int64_t SplatImm = cast(N.getOperand(0))->getSExtValue(); - // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL - // share semantics when the operand type is wider than the resulting vector - // element type: an implicit truncation first takes place. Therefore, perform - // a manual truncation/sign-extension in order to ignore any truncated bits - // and catch any zero-extended immediate. + // ISD::SPLAT_VECTOR, RISCVISD::VMV_V_X_VL share semantics when the operand + // type is wider than the resulting vector element type: an implicit + // truncation first takes place. Therefore, perform a manual + // truncation/sign-extension in order to ignore any truncated bits and catch + // any zero-extended immediate. // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first // sign-extending to (XLenVT -1). MVT XLenVT = Subtarget.getXLenVT(); @@ -1948,7 +1946,6 @@ bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) { if ((N.getOpcode() != ISD::SPLAT_VECTOR && - N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 && N.getOpcode() != RISCVISD::VMV_V_X_VL) || !isa(N.getOperand(0))) return false; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -141,9 +141,6 @@ VMV_S_X_VL, // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand. VFMV_S_F_VL, - // Splats an i64 scalar to a vector type (with element type i64) where the - // scalar is a sign-extended i32. - SPLAT_VECTOR_I64, // Splats an 64-bit value that has been split into two i32 parts. This is // expanded late to two scalar stores and a stride 0 vector load. SPLAT_VECTOR_SPLIT_I64_VL, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4025,7 +4025,7 @@ // Custom-lower a SPLAT_VECTOR_PARTS where XLEN> 31) == HiC) - return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); + return DAG.getNode( + RISCVISD::VMV_V_X_VL, DL, VecVT, Lo, + DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32)); } // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended. if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo && isa(Hi.getOperand(1)) && Hi.getConstantOperandVal(1) == 31) - return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); + return DAG.getNode( + RISCVISD::VMV_V_X_VL, DL, VecVT, Lo, + DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32)); // Fall back to use a stack store and stride x0 vector load. Use X0 as VL. return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi, @@ -4089,7 +4093,7 @@ // Be careful not to introduce illegal scalar types at this stage, and be // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is // illegal and must be expanded. Since we know that the constants are - // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. + // sign-extended 32-bit values, we use VMV_V_X_VL directly. bool IsRV32E64 = !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; @@ -4097,9 +4101,12 @@ SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); } else { - SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); + SplatZero = + DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, SplatZero, + DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT)); SplatTrueVal = - DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal); + DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, SplatTrueVal, + DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT)); } return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); @@ -5416,7 +5423,9 @@ if (!IsRV32E64) SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1); else - SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1); + SplatVL = + DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, VLMinus1, + DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT)); SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL); SDValue Indices = @@ -8134,8 +8143,9 @@ // We don't need the upper 32 bits of a 64-bit element for a shift amount. SDLoc DL(N); EVT VT = N->getValueType(0); - ShAmt = - DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0)); + ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), + DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, + Subtarget.getXLenVT())); return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt); } break; @@ -10290,7 +10300,6 @@ NODE_NAME_CASE(VMV_X_S) NODE_NAME_CASE(VMV_S_X_VL) NODE_NAME_CASE(VFMV_S_F_VL) - NODE_NAME_CASE(SPLAT_VECTOR_I64) NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL) NODE_NAME_CASE(READ_VLENB) NODE_NAME_CASE(TRUNCATE_VECTOR_VL) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -5097,5 +5097,5 @@ } // Predicates = [HasVInstructionsAnyF] // Include the non-intrinsic ISel patterns -include "RISCVInstrInfoVSDPatterns.td" include "RISCVInstrInfoVVLPatterns.td" +include "RISCVInstrInfoVSDPatterns.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -22,31 +22,9 @@ // Helpers to define the SDNode patterns. //===----------------------------------------------------------------------===// -def SDTSplatI64 : SDTypeProfile<1, 1, [ - SDTCVecEltisVT<0, i64>, SDTCisVT<1, i32> -]>; - -def rv32_splat_i64 : SDNode<"RISCVISD::SPLAT_VECTOR_I64", SDTSplatI64>; - -def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, - SDTCisVT<1, XLenVT>]>; -def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; -def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; - def rvv_vnot : PatFrag<(ops node:$in), (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; -// Give explicit Complexity to prefer simm5/uimm5. -def SplatPat : ComplexPattern; -def SplatPat_simm5 : ComplexPattern; -def SplatPat_uimm5 : ComplexPattern; -def SplatPat_simm5_plus1 - : ComplexPattern; -def SplatPat_simm5_plus1_nonzero - : ComplexPattern; - class SwapHelper { dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); } @@ -526,7 +504,7 @@ } foreach vti = [VI64M1, VI64M2, VI64M4, VI64M8] in { def : Pat<(shl (vti.Vector vti.RegClass:$rs1), - (vti.Vector (rv32_splat_i64 (XLenVT 1)))), + (vti.Vector (riscv_vmv_v_x_vl 1, (XLenVT srcvalue)))), (!cast("PseudoVADD_VV_"# vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -185,6 +185,11 @@ def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL", SDT_RISCVSelect_VL>; def riscv_vp_merge_vl : SDNode<"RISCVISD::VP_MERGE_VL", SDT_RISCVSelect_VL>; +def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, + SDTCisVT<1, XLenVT>]>; +def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; +def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; + def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCVecEltisVT<0, i1>, @@ -273,6 +278,17 @@ "FADD", "SEQ_FADD", "FMIN", "FMAX"] in def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; +// Give explicit Complexity to prefer simm5/uimm5. +def SplatPat : ComplexPattern; +def SplatPat_simm5 : ComplexPattern; +def SplatPat_uimm5 : ComplexPattern; +def SplatPat_simm5_plus1 + : ComplexPattern; +def SplatPat_simm5_plus1_nonzero + : ComplexPattern; + // Ignore the vl operand. def SplatFPOp : PatFrag<(ops node:$op), (riscv_vfmv_v_f_vl node:$op, srcvalue)>;