diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -100,6 +100,9 @@ // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign // extended from the vector element size. VMV_X_S, + // VMV_S_XF_VL matches the semantics of vmv.s.x/vmv.s.f, depending on the + // types of its operands. It carries a VL operand. + VMV_S_XF_VL, // Splats an i64 scalar to a vector type (with element type i64) where the // scalar is a sign-extended i32. SPLAT_VECTOR_I64, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2210,14 +2210,12 @@ // (slideup vec, (insertelt (slidedown impdef, vec, idx), val, 0), idx), if (Subtarget.is64Bit() || Val.getValueType() != MVT::i64) { if (isNullConstant(Idx)) - return Op; + return DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Vec, Val, VL); SDValue Slidedown = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); SDValue InsertElt0 = - DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Slidedown, Val, - DAG.getConstant(0, DL, Subtarget.getXLenVT())); - + DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Slidedown, Val, VL); return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, InsertElt0, Idx, Mask, VL); } @@ -5735,6 +5733,7 @@ NODE_NAME_CASE(VMV_V_X_VL) NODE_NAME_CASE(VFMV_V_F_VL) NODE_NAME_CASE(VMV_X_S) + NODE_NAME_CASE(VMV_S_XF_VL) NODE_NAME_CASE(SPLAT_VECTOR_I64) NODE_NAME_CASE(READ_VLENB) NODE_NAME_CASE(TRUNCATE_VECTOR_VL) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -754,47 +754,16 @@ } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// -// Vector Element Inserts/Extracts +// Vector Element Extracts //===----------------------------------------------------------------------===// - -// The built-in TableGen 'insertelt' node must return the same type as the -// vector element type. On RISC-V, XLenVT is the only legal integer type, so -// for integer inserts we use a custom node which inserts an XLenVT-typed -// value. -def riscv_insert_vector_elt - : SDNode<"ISD::INSERT_VECTOR_ELT", - SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisVT<2, XLenVT>, - SDTCisPtrTy<3>]>, []>; - -let Predicates = [HasStdExtV] in -foreach vti = AllIntegerVectors in { - def : Pat<(vti.Vector (riscv_insert_vector_elt (vti.Vector vti.RegClass:$merge), - vti.ScalarRegClass:$rs1, 0)), - (!cast("PseudoVMV_S_X_"#vti.LMul.MX) - vti.RegClass:$merge, - (vti.Scalar vti.ScalarRegClass:$rs1), - vti.AVL, vti.SEW)>; -} - let Predicates = [HasStdExtV, HasStdExtF] in foreach vti = AllFloatVectors in { - defvar MX = vti.LMul.MX; defvar vmv_f_s_inst = !cast(!strconcat("PseudoVFMV_", vti.ScalarSuffix, - "_S_", MX)); - defvar vmv_s_f_inst = !cast(!strconcat("PseudoVFMV_S_", - vti.ScalarSuffix, - "_", vti.LMul.MX)); - // Only pattern-match insert/extract-element operations where the index is - // 0. Any other index will have been custom-lowered to slide the vector - // correctly into place (and, in the case of insert, slide it back again - // afterwards). + "_S_", vti.LMul.MX)); + // Only pattern-match extract-element operations where the index is 0. Any + // other index will have been custom-lowered to slide the vector correctly + // into place. def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), (vmv_f_s_inst vti.RegClass:$rs2, vti.SEW)>; - - def : Pat<(vti.Vector (insertelt (vti.Vector vti.RegClass:$merge), - vti.ScalarRegClass:$rs1, 0)), - (vmv_s_f_inst vti.RegClass:$merge, - (vti.Scalar vti.ScalarRegClass:$rs1), - vti.AVL, vti.SEW)>; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -54,6 +54,9 @@ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisFP<0>, SDTCisEltOfVec<1, 0>, SDTCisVT<2, XLenVT>]>>; +def riscv_vmv_s_xf_vl : SDNode<"RISCVISD::VMV_S_XF_VL", + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, + SDTCisVT<3, XLenVT>]>>; def riscv_vle_vl : SDNode<"RISCVISD::VLE_VL", SDT_RISCVVLE_VL, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; @@ -941,10 +944,16 @@ } // Predicates = [HasStdExtV] -// 17.4. Vector Register GAther Instruction let Predicates = [HasStdExtV] in { - +// 17.1. Integer Scalar Move Instructions +// 17.4. Vector Register Gather Instruction foreach vti = AllIntegerVectors in { + def : Pat<(vti.Vector (riscv_vmv_s_xf_vl (vti.Vector vti.RegClass:$merge), + (XLenVT vti.ScalarRegClass:$rs1), + (XLenVT (VLOp GPR:$vl)))), + (!cast("PseudoVMV_S_X_"#vti.LMul.MX) + vti.RegClass:$merge, + (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, (vti.Mask true_mask), (XLenVT (VLOp GPR:$vl)))), @@ -961,7 +970,14 @@ let Predicates = [HasStdExtV, HasStdExtF] in { +// 17.2. Floating-Point Scalar Move Instructions foreach vti = AllFloatVectors in { + def : Pat<(vti.Vector (riscv_vmv_s_xf_vl (vti.Vector vti.RegClass:$merge), + vti.ScalarRegClass:$rs1, + (XLenVT (VLOp GPR:$vl)))), + (!cast("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) + vti.RegClass:$merge, + (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.SEW)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, (vti.Mask true_mask), (XLenVT (VLOp GPR:$vl)))), diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -30,7 +30,6 @@ ; RV64-NEXT: vsetivli a2, 4, e64,m2,ta,mu ; RV64-NEXT: vle64.v v26, (a0) ; RV64-NEXT: vslidedown.vi v28, v26, 3 -; RV64-NEXT: vsetvli a2, zero, e64,m2,ta,mu ; RV64-NEXT: vmv.s.x v28, a1 ; RV64-NEXT: vsetivli a1, 4, e64,m2,tu,mu ; RV64-NEXT: vslideup.vi v26, v28, 3 @@ -101,7 +100,6 @@ ; RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu ; RV32-NEXT: vle8.v v25, (a0) ; RV32-NEXT: vslidedown.vi v26, v25, 14 -; RV32-NEXT: vsetvli a2, zero, e8,m1,ta,mu ; RV32-NEXT: vmv.s.x v26, a1 ; RV32-NEXT: vsetivli a1, 16, e8,m1,tu,mu ; RV32-NEXT: vslideup.vi v25, v26, 14 @@ -114,7 +112,6 @@ ; RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu ; RV64-NEXT: vle8.v v25, (a0) ; RV64-NEXT: vslidedown.vi v26, v25, 14 -; RV64-NEXT: vsetvli a2, zero, e8,m1,ta,mu ; RV64-NEXT: vmv.s.x v26, a1 ; RV64-NEXT: vsetivli a1, 16, e8,m1,tu,mu ; RV64-NEXT: vslideup.vi v25, v26, 14 @@ -134,7 +131,6 @@ ; RV32-NEXT: vsetvli a4, a3, e16,m4,ta,mu ; RV32-NEXT: vle16.v v28, (a0) ; RV32-NEXT: vslidedown.vx v8, v28, a2 -; RV32-NEXT: vsetvli a4, zero, e16,m4,ta,mu ; RV32-NEXT: vmv.s.x v8, a1 ; RV32-NEXT: vsetvli a1, a3, e16,m4,tu,mu ; RV32-NEXT: vslideup.vx v28, v8, a2 @@ -149,7 +145,6 @@ ; RV64-NEXT: vle16.v v28, (a0) ; RV64-NEXT: sext.w a2, a2 ; RV64-NEXT: vslidedown.vx v8, v28, a2 -; RV64-NEXT: vsetvli a4, zero, e16,m4,ta,mu ; RV64-NEXT: vmv.s.x v8, a1 ; RV64-NEXT: vsetvli a1, a3, e16,m4,tu,mu ; RV64-NEXT: vslideup.vx v28, v8, a2 @@ -168,7 +163,6 @@ ; RV32-NEXT: vsetivli a2, 8, e32,m2,ta,mu ; RV32-NEXT: vle32.v v26, (a0) ; RV32-NEXT: vslidedown.vx v28, v26, a1 -; RV32-NEXT: vsetvli a2, zero, e32,m2,ta,mu ; RV32-NEXT: vfmv.s.f v28, fa0 ; RV32-NEXT: vsetivli a2, 8, e32,m2,tu,mu ; RV32-NEXT: vslideup.vx v26, v28, a1 @@ -182,7 +176,6 @@ ; RV64-NEXT: vle32.v v26, (a0) ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: vslidedown.vx v28, v26, a1 -; RV64-NEXT: vsetvli a2, zero, e32,m2,ta,mu ; RV64-NEXT: vfmv.s.f v28, fa0 ; RV64-NEXT: vsetivli a2, 8, e32,m2,tu,mu ; RV64-NEXT: vslideup.vx v26, v28, a1