diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -311,10 +311,6 @@ STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE, STRICT_FCVT_WU_RV64, - // Memory opcodes start here. - VLE_VL = ISD::FIRST_TARGET_MEMORY_OPCODE, - VSE_VL, - // WARNING: Do not add anything in the end unless you want the node to // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all // opcodes will be thought as target memory ops! diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5664,15 +5664,23 @@ "Expecting a correctly-aligned load"); MVT VT = Op.getSimpleValueType(); + MVT XLenVT = Subtarget.getXLenVT(); MVT ContainerVT = getContainerForFixedLengthVector(VT); - SDValue VL = - DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); + SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); + bool IsMaskOp = VT.getVectorElementType() == MVT::i1; + SDValue IntID = DAG.getTargetConstant( + IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT); + SmallVector Ops{Load->getChain(), IntID}; + if (!IsMaskOp) + Ops.push_back(DAG.getUNDEF(ContainerVT)); + Ops.push_back(Load->getBasePtr()); + Ops.push_back(VL); SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); - SDValue NewLoad = DAG.getMemIntrinsicNode( - RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL}, - Load->getMemoryVT(), Load->getMemOperand()); + SDValue NewLoad = + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, + Load->getMemoryVT(), Load->getMemOperand()); SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); return DAG.getMergeValues({Result, Load->getChain()}, DL); @@ -5691,6 +5699,7 @@ SDValue StoreVal = Store->getValue(); MVT VT = StoreVal.getSimpleValueType(); + MVT XLenVT = Subtarget.getXLenVT(); // If the size less than a byte, we need to pad with zeros to make a byte. if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) { @@ -5702,14 +5711,17 @@ MVT ContainerVT = getContainerForFixedLengthVector(VT); - SDValue VL = - DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); + SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); SDValue NewValue = convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget); + + bool IsMaskOp = VT.getVectorElementType() == MVT::i1; + SDValue IntID = DAG.getTargetConstant( + IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT); return DAG.getMemIntrinsicNode( - RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other), - {Store->getChain(), NewValue, Store->getBasePtr(), VL}, + ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), + {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL}, Store->getMemoryVT(), Store->getMemOperand()); } @@ -10826,8 +10838,6 @@ NODE_NAME_CASE(VSEXT_VL) NODE_NAME_CASE(VZEXT_VL) NODE_NAME_CASE(VCPOP_VL) - NODE_NAME_CASE(VLE_VL) - NODE_NAME_CASE(VSE_VL) NODE_NAME_CASE(READ_CSR) NODE_NAME_CASE(WRITE_CSR) NODE_NAME_CASE(SWAP_CSR) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -21,11 +21,6 @@ // Helpers to define the VL patterns. //===----------------------------------------------------------------------===// -def SDT_RISCVVLE_VL : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>, - SDTCisVT<2, XLenVT>]>; -def SDT_RISCVVSE_VL : SDTypeProfile<0, 3, [SDTCisVec<0>, SDTCisPtrTy<1>, - SDTCisVT<2, XLenVT>]>; - def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVec<0>, SDTCisInt<0>, @@ -66,11 +61,6 @@ SDTCisEltOfVec<2, 0>, SDTCisVT<3, XLenVT>]>>; -def riscv_vle_vl : SDNode<"RISCVISD::VLE_VL", SDT_RISCVVLE_VL, - [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; -def riscv_vse_vl : SDNode<"RISCVISD::VSE_VL", SDT_RISCVVSE_VL, - [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; - def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; @@ -745,29 +735,6 @@ let Predicates = [HasVInstructions] in { -// 7.4. Vector Unit-Stride Instructions -foreach vti = AllVectors in { - defvar load_instr = !cast("PseudoVLE"#vti.SEW#"_V_"#vti.LMul.MX); - defvar store_instr = !cast("PseudoVSE"#vti.SEW#"_V_"#vti.LMul.MX); - // Load - def : Pat<(vti.Vector (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)), - (load_instr BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>; - // Store - def : Pat<(riscv_vse_vl (vti.Vector vti.RegClass:$rs2), BaseAddr:$rs1, - VLOpFrag), - (store_instr vti.RegClass:$rs2, BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>; -} - -foreach mti = AllMasks in { - defvar load_instr = !cast("PseudoVLM_V_"#mti.BX); - defvar store_instr = !cast("PseudoVSM_V_"#mti.BX); - def : Pat<(mti.Mask (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)), - (load_instr BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>; - def : Pat<(riscv_vse_vl (mti.Mask VR:$rs2), BaseAddr:$rs1, - VLOpFrag), - (store_instr VR:$rs2, BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>; -} - // 12.1. Vector Single-Width Integer Add and Subtract defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX;