diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -637,6 +637,15 @@ continue; } + // Use SPLAT_VECTOR to prevent type legalization from destroying the + // splats when type legalizing i64 scalar on RV32. + // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs + // improvements first. + if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) { + setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); + setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); + } + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); @@ -1205,6 +1214,24 @@ return false; } +static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + MVT VT = Op.getSimpleValueType(); + assert(VT.isFixedLengthVector() && "Unexpected vector!"); + + MVT ContainerVT = + RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget); + + SDLoc DL(Op); + SDValue Mask, VL; + std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); + + unsigned Opc = + VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; + SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL); + return convertFromScalableVector(VT, Splat, DAG, Subtarget); +} + static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { MVT VT = Op.getSimpleValueType(); @@ -1453,16 +1480,16 @@ return SDValue(); } -// Called by type legalization to handle splat of i64 on RV32. -// FIXME: We can optimize this when the type has sign or zero bits in one -// of the halves. -static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar, - SDValue VL, SelectionDAG &DAG) { - SDValue ThirtyTwoV = DAG.getConstant(32, DL, VT); - SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, - DAG.getConstant(0, DL, MVT::i32)); - SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, - DAG.getConstant(1, DL, MVT::i32)); +static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo, + SDValue Hi, SDValue VL, SelectionDAG &DAG) { + if (isa(Lo) && isa(Hi)) { + int32_t LoC = cast(Lo)->getSExtValue(); + int32_t HiC = cast(Hi)->getSExtValue(); + // If Hi constant is all the same sign bit as Lo, lower this as a custom + // node in order to try and match RVV vector/scalar instructions. + if ((LoC >> 31) == HiC) + return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL); + } // vmv.v.x vX, hi // vsll.vx vX, vX, /*32*/ @@ -1470,6 +1497,7 @@ // vsll.vx vY, vY, /*32*/ // vsrl.vx vY, vY, /*32*/ // vor.vv vX, vX, vY + SDValue ThirtyTwoV = DAG.getConstant(32, DL, VT); MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); Lo = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL); @@ -1482,6 +1510,19 @@ return DAG.getNode(RISCVISD::OR_VL, DL, VT, Lo, Hi, Mask, VL); } +// Called by type legalization to handle splat of i64 on RV32. +// FIXME: We can optimize this when the type has sign or zero bits in one +// of the halves. +static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar, + SDValue VL, SelectionDAG &DAG) { + assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!"); + SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, + DAG.getConstant(0, DL, MVT::i32)); + SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, + DAG.getConstant(1, DL, MVT::i32)); + return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG); +} + // This function lowers a splat of a scalar operand Splat with the vector // length VL. It ensures the final sequence is type legal, which is useful when // lowering a splat after type legalization. @@ -1508,16 +1549,6 @@ assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 && "Unexpected scalar for splat lowering!"); - // If this is a sign-extended 32-bit constant, we can truncate it and rely - // on the instruction to sign-extend since SEW>XLEN. - if (auto *CVal = dyn_cast(Scalar)) { - if (isInt<32>(CVal->getSExtValue())) - return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, - DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32), - VL); - } - - // Otherwise use the more complicated splatting algorithm. return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); } @@ -2069,6 +2100,8 @@ return lowerVECTOR_REVERSE(Op, DAG); case ISD::BUILD_VECTOR: return lowerBUILD_VECTOR(Op, DAG, Subtarget); + case ISD::SPLAT_VECTOR: + return lowerSPLAT_VECTOR(Op, DAG, Subtarget); case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); case ISD::CONCAT_VECTORS: { @@ -2625,7 +2658,7 @@ SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - EVT VecVT = Op.getValueType(); + MVT VecVT = Op.getSimpleValueType(); assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && "Unexpected SPLAT_VECTOR_PARTS lowering"); @@ -2633,6 +2666,17 @@ SDValue Lo = Op.getOperand(0); SDValue Hi = Op.getOperand(1); + if (VecVT.isFixedLengthVector()) { + MVT ContainerVT = getContainerForFixedLengthVector(VecVT); + SDLoc DL(Op); + SDValue Mask, VL; + std::tie(Mask, VL) = + getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); + + SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG); + return convertFromScalableVector(VecVT, Res, DAG, Subtarget); + } + if (isa(Lo) && isa(Hi)) { int32_t LoC = cast(Lo)->getSExtValue(); int32_t HiC = cast(Hi)->getSExtValue(); diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -274,9 +274,8 @@ ; RV32-NEXT: addi a0, a0, %lo(.LCPI12_0) ; RV32-NEXT: vsetivli a1, 8, e16,m1,ta,mu ; RV32-NEXT: vle16.v v25, (a0) -; RV32-NEXT: vsetivli a0, 16, e32,m4,ta,mu -; RV32-NEXT: vmv.v.i v12, -1 ; RV32-NEXT: vsetivli a0, 8, e64,m4,ta,mu +; RV32-NEXT: vmv.v.i v12, -1 ; RV32-NEXT: vrgatherei16.vv v28, v12, v25 ; RV32-NEXT: vsetivli a0, 8, e64,m4,tu,mu ; RV32-NEXT: vrgatherei16.vv v28, v8, v27, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll @@ -48,35 +48,41 @@ define void @splat_v2i64(<2 x i64>* %x, i64 %y) { ; LMULMAX8-RV32-LABEL: splat_v2i64: ; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: addi a3, zero, 5 -; LMULMAX8-RV32-NEXT: vsetivli a4, 1, e8,m1,ta,mu -; LMULMAX8-RV32-NEXT: vmv.s.x v0, a3 -; LMULMAX8-RV32-NEXT: vsetivli a3, 4, e32,m1,ta,mu +; LMULMAX8-RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu ; LMULMAX8-RV32-NEXT: vmv.v.x v25, a2 -; LMULMAX8-RV32-NEXT: vmerge.vxm v25, v25, a1, v0 -; LMULMAX8-RV32-NEXT: vse32.v v25, (a0) +; LMULMAX8-RV32-NEXT: addi a2, zero, 32 +; LMULMAX8-RV32-NEXT: vsll.vx v25, v25, a2 +; LMULMAX8-RV32-NEXT: vmv.v.x v26, a1 +; LMULMAX8-RV32-NEXT: vsll.vx v26, v26, a2 +; LMULMAX8-RV32-NEXT: vsrl.vx v26, v26, a2 +; LMULMAX8-RV32-NEXT: vor.vv v25, v26, v25 +; LMULMAX8-RV32-NEXT: vse64.v v25, (a0) ; LMULMAX8-RV32-NEXT: ret ; ; LMULMAX2-RV32-LABEL: splat_v2i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: addi a3, zero, 5 -; LMULMAX2-RV32-NEXT: vsetivli a4, 1, e8,m1,ta,mu -; LMULMAX2-RV32-NEXT: vmv.s.x v0, a3 -; LMULMAX2-RV32-NEXT: vsetivli a3, 4, e32,m1,ta,mu +; LMULMAX2-RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu ; LMULMAX2-RV32-NEXT: vmv.v.x v25, a2 -; LMULMAX2-RV32-NEXT: vmerge.vxm v25, v25, a1, v0 -; LMULMAX2-RV32-NEXT: vse32.v v25, (a0) +; LMULMAX2-RV32-NEXT: addi a2, zero, 32 +; LMULMAX2-RV32-NEXT: vsll.vx v25, v25, a2 +; LMULMAX2-RV32-NEXT: vmv.v.x v26, a1 +; LMULMAX2-RV32-NEXT: vsll.vx v26, v26, a2 +; LMULMAX2-RV32-NEXT: vsrl.vx v26, v26, a2 +; LMULMAX2-RV32-NEXT: vor.vv v25, v26, v25 +; LMULMAX2-RV32-NEXT: vse64.v v25, (a0) ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_v2i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: addi a3, zero, 5 -; LMULMAX1-RV32-NEXT: vsetivli a4, 1, e8,m1,ta,mu -; LMULMAX1-RV32-NEXT: vmv.s.x v0, a3 -; LMULMAX1-RV32-NEXT: vsetivli a3, 4, e32,m1,ta,mu +; LMULMAX1-RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu ; LMULMAX1-RV32-NEXT: vmv.v.x v25, a2 -; LMULMAX1-RV32-NEXT: vmerge.vxm v25, v25, a1, v0 -; LMULMAX1-RV32-NEXT: vse32.v v25, (a0) +; LMULMAX1-RV32-NEXT: addi a2, zero, 32 +; LMULMAX1-RV32-NEXT: vsll.vx v25, v25, a2 +; LMULMAX1-RV32-NEXT: vmv.v.x v26, a1 +; LMULMAX1-RV32-NEXT: vsll.vx v26, v26, a2 +; LMULMAX1-RV32-NEXT: vsrl.vx v26, v26, a2 +; LMULMAX1-RV32-NEXT: vor.vv v25, v26, v25 +; LMULMAX1-RV32-NEXT: vse64.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX8-RV64-LABEL: splat_v2i64: @@ -197,24 +203,28 @@ define void @splat_v4i64(<4 x i64>* %x, i64 %y) { ; LMULMAX8-RV32-LABEL: splat_v4i64: ; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: addi a3, zero, 85 -; LMULMAX8-RV32-NEXT: vsetivli a4, 1, e8,m1,ta,mu -; LMULMAX8-RV32-NEXT: vmv.s.x v0, a3 -; LMULMAX8-RV32-NEXT: vsetivli a3, 8, e32,m2,ta,mu +; LMULMAX8-RV32-NEXT: vsetivli a3, 4, e64,m2,ta,mu ; LMULMAX8-RV32-NEXT: vmv.v.x v26, a2 -; LMULMAX8-RV32-NEXT: vmerge.vxm v26, v26, a1, v0 -; LMULMAX8-RV32-NEXT: vse32.v v26, (a0) +; LMULMAX8-RV32-NEXT: addi a2, zero, 32 +; LMULMAX8-RV32-NEXT: vsll.vx v26, v26, a2 +; LMULMAX8-RV32-NEXT: vmv.v.x v28, a1 +; LMULMAX8-RV32-NEXT: vsll.vx v28, v28, a2 +; LMULMAX8-RV32-NEXT: vsrl.vx v28, v28, a2 +; LMULMAX8-RV32-NEXT: vor.vv v26, v28, v26 +; LMULMAX8-RV32-NEXT: vse64.v v26, (a0) ; LMULMAX8-RV32-NEXT: ret ; ; LMULMAX2-RV32-LABEL: splat_v4i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: addi a3, zero, 85 -; LMULMAX2-RV32-NEXT: vsetivli a4, 1, e8,m1,ta,mu -; LMULMAX2-RV32-NEXT: vmv.s.x v0, a3 -; LMULMAX2-RV32-NEXT: vsetivli a3, 8, e32,m2,ta,mu +; LMULMAX2-RV32-NEXT: vsetivli a3, 4, e64,m2,ta,mu ; LMULMAX2-RV32-NEXT: vmv.v.x v26, a2 -; LMULMAX2-RV32-NEXT: vmerge.vxm v26, v26, a1, v0 -; LMULMAX2-RV32-NEXT: vse32.v v26, (a0) +; LMULMAX2-RV32-NEXT: addi a2, zero, 32 +; LMULMAX2-RV32-NEXT: vsll.vx v26, v26, a2 +; LMULMAX2-RV32-NEXT: vmv.v.x v28, a1 +; LMULMAX2-RV32-NEXT: vsll.vx v28, v28, a2 +; LMULMAX2-RV32-NEXT: vsrl.vx v28, v28, a2 +; LMULMAX2-RV32-NEXT: vor.vv v26, v28, v26 +; LMULMAX2-RV32-NEXT: vse64.v v26, (a0) ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_v4i64: @@ -298,47 +308,12 @@ } define void @splat_zero_v2i64(<2 x i64>* %x) { -; LMULMAX8-RV32-LABEL: splat_zero_v2i64: -; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; LMULMAX8-RV32-NEXT: vmv.v.i v25, 0 -; LMULMAX8-RV32-NEXT: vse32.v v25, (a0) -; LMULMAX8-RV32-NEXT: ret -; -; LMULMAX2-RV32-LABEL: splat_zero_v2i64: -; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; LMULMAX2-RV32-NEXT: vmv.v.i v25, 0 -; LMULMAX2-RV32-NEXT: vse32.v v25, (a0) -; LMULMAX2-RV32-NEXT: ret -; -; LMULMAX1-RV32-LABEL: splat_zero_v2i64: -; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; LMULMAX1-RV32-NEXT: vmv.v.i v25, 0 -; LMULMAX1-RV32-NEXT: vse32.v v25, (a0) -; LMULMAX1-RV32-NEXT: ret -; -; LMULMAX8-RV64-LABEL: splat_zero_v2i64: -; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; LMULMAX8-RV64-NEXT: vmv.v.i v25, 0 -; LMULMAX8-RV64-NEXT: vse64.v v25, (a0) -; LMULMAX8-RV64-NEXT: ret -; -; LMULMAX2-RV64-LABEL: splat_zero_v2i64: -; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; LMULMAX2-RV64-NEXT: vmv.v.i v25, 0 -; LMULMAX2-RV64-NEXT: vse64.v v25, (a0) -; LMULMAX2-RV64-NEXT: ret -; -; LMULMAX1-RV64-LABEL: splat_zero_v2i64: -; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; LMULMAX1-RV64-NEXT: vmv.v.i v25, 0 -; LMULMAX1-RV64-NEXT: vse64.v v25, (a0) -; LMULMAX1-RV64-NEXT: ret +; CHECK-LABEL: splat_zero_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vse64.v v25, (a0) +; CHECK-NEXT: ret %a = insertelement <2 x i64> undef, i64 0, i32 0 %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer store <2 x i64> %b, <2 x i64>* %x @@ -435,19 +410,19 @@ } define void @splat_zero_v4i64(<4 x i64>* %x) { -; LMULMAX8-RV32-LABEL: splat_zero_v4i64: -; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu -; LMULMAX8-RV32-NEXT: vmv.v.i v26, 0 -; LMULMAX8-RV32-NEXT: vse32.v v26, (a0) -; LMULMAX8-RV32-NEXT: ret +; LMULMAX8-LABEL: splat_zero_v4i64: +; LMULMAX8: # %bb.0: +; LMULMAX8-NEXT: vsetivli a1, 4, e64,m2,ta,mu +; LMULMAX8-NEXT: vmv.v.i v26, 0 +; LMULMAX8-NEXT: vse64.v v26, (a0) +; LMULMAX8-NEXT: ret ; -; LMULMAX2-RV32-LABEL: splat_zero_v4i64: -; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu -; LMULMAX2-RV32-NEXT: vmv.v.i v26, 0 -; LMULMAX2-RV32-NEXT: vse32.v v26, (a0) -; LMULMAX2-RV32-NEXT: ret +; LMULMAX2-LABEL: splat_zero_v4i64: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu +; LMULMAX2-NEXT: vmv.v.i v26, 0 +; LMULMAX2-NEXT: vse64.v v26, (a0) +; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_zero_v4i64: ; LMULMAX1-RV32: # %bb.0: @@ -458,20 +433,6 @@ ; LMULMAX1-RV32-NEXT: vse32.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; -; LMULMAX8-RV64-LABEL: splat_zero_v4i64: -; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX8-RV64-NEXT: vmv.v.i v26, 0 -; LMULMAX8-RV64-NEXT: vse64.v v26, (a0) -; LMULMAX8-RV64-NEXT: ret -; -; LMULMAX2-RV64-LABEL: splat_zero_v4i64: -; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX2-RV64-NEXT: vmv.v.i v26, 0 -; LMULMAX2-RV64-NEXT: vse64.v v26, (a0) -; LMULMAX2-RV64-NEXT: ret -; ; LMULMAX1-RV64-LABEL: splat_zero_v4i64: ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu @@ -526,47 +487,12 @@ } define void @splat_allones_v2i64(<2 x i64>* %x) { -; LMULMAX8-RV32-LABEL: splat_allones_v2i64: -; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; LMULMAX8-RV32-NEXT: vmv.v.i v25, -1 -; LMULMAX8-RV32-NEXT: vse32.v v25, (a0) -; LMULMAX8-RV32-NEXT: ret -; -; LMULMAX2-RV32-LABEL: splat_allones_v2i64: -; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; LMULMAX2-RV32-NEXT: vmv.v.i v25, -1 -; LMULMAX2-RV32-NEXT: vse32.v v25, (a0) -; LMULMAX2-RV32-NEXT: ret -; -; LMULMAX1-RV32-LABEL: splat_allones_v2i64: -; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; LMULMAX1-RV32-NEXT: vmv.v.i v25, -1 -; LMULMAX1-RV32-NEXT: vse32.v v25, (a0) -; LMULMAX1-RV32-NEXT: ret -; -; LMULMAX8-RV64-LABEL: splat_allones_v2i64: -; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; LMULMAX8-RV64-NEXT: vmv.v.i v25, -1 -; LMULMAX8-RV64-NEXT: vse64.v v25, (a0) -; LMULMAX8-RV64-NEXT: ret -; -; LMULMAX2-RV64-LABEL: splat_allones_v2i64: -; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; LMULMAX2-RV64-NEXT: vmv.v.i v25, -1 -; LMULMAX2-RV64-NEXT: vse64.v v25, (a0) -; LMULMAX2-RV64-NEXT: ret -; -; LMULMAX1-RV64-LABEL: splat_allones_v2i64: -; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; LMULMAX1-RV64-NEXT: vmv.v.i v25, -1 -; LMULMAX1-RV64-NEXT: vse64.v v25, (a0) -; LMULMAX1-RV64-NEXT: ret +; CHECK-LABEL: splat_allones_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -1 +; CHECK-NEXT: vse64.v v25, (a0) +; CHECK-NEXT: ret %a = insertelement <2 x i64> undef, i64 -1, i32 0 %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer store <2 x i64> %b, <2 x i64>* %x @@ -663,19 +589,19 @@ } define void @splat_allones_v4i64(<4 x i64>* %x) { -; LMULMAX8-RV32-LABEL: splat_allones_v4i64: -; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu -; LMULMAX8-RV32-NEXT: vmv.v.i v26, -1 -; LMULMAX8-RV32-NEXT: vse32.v v26, (a0) -; LMULMAX8-RV32-NEXT: ret +; LMULMAX8-LABEL: splat_allones_v4i64: +; LMULMAX8: # %bb.0: +; LMULMAX8-NEXT: vsetivli a1, 4, e64,m2,ta,mu +; LMULMAX8-NEXT: vmv.v.i v26, -1 +; LMULMAX8-NEXT: vse64.v v26, (a0) +; LMULMAX8-NEXT: ret ; -; LMULMAX2-RV32-LABEL: splat_allones_v4i64: -; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu -; LMULMAX2-RV32-NEXT: vmv.v.i v26, -1 -; LMULMAX2-RV32-NEXT: vse32.v v26, (a0) -; LMULMAX2-RV32-NEXT: ret +; LMULMAX2-LABEL: splat_allones_v4i64: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu +; LMULMAX2-NEXT: vmv.v.i v26, -1 +; LMULMAX2-NEXT: vse64.v v26, (a0) +; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_allones_v4i64: ; LMULMAX1-RV32: # %bb.0: @@ -686,20 +612,6 @@ ; LMULMAX1-RV32-NEXT: vse32.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; -; LMULMAX8-RV64-LABEL: splat_allones_v4i64: -; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX8-RV64-NEXT: vmv.v.i v26, -1 -; LMULMAX8-RV64-NEXT: vse64.v v26, (a0) -; LMULMAX8-RV64-NEXT: ret -; -; LMULMAX2-RV64-LABEL: splat_allones_v4i64: -; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX2-RV64-NEXT: vmv.v.i v26, -1 -; LMULMAX2-RV64-NEXT: vse64.v v26, (a0) -; LMULMAX2-RV64-NEXT: ret -; ; LMULMAX1-RV64-LABEL: splat_allones_v4i64: ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu @@ -719,27 +631,21 @@ ; FIXME: We should prevent this and use the implicit sign extension of vmv.v.x ; with SEW=64 on RV32. define void @splat_allones_with_use_v4i64(<4 x i64>* %x) { -; LMULMAX8-RV32-LABEL: splat_allones_with_use_v4i64: -; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX8-RV32-NEXT: vle64.v v26, (a0) -; LMULMAX8-RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu -; LMULMAX8-RV32-NEXT: vmv.v.i v28, -1 -; LMULMAX8-RV32-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX8-RV32-NEXT: vadd.vv v26, v26, v28 -; LMULMAX8-RV32-NEXT: vse64.v v26, (a0) -; LMULMAX8-RV32-NEXT: ret +; LMULMAX8-LABEL: splat_allones_with_use_v4i64: +; LMULMAX8: # %bb.0: +; LMULMAX8-NEXT: vsetivli a1, 4, e64,m2,ta,mu +; LMULMAX8-NEXT: vle64.v v26, (a0) +; LMULMAX8-NEXT: vadd.vi v26, v26, -1 +; LMULMAX8-NEXT: vse64.v v26, (a0) +; LMULMAX8-NEXT: ret ; -; LMULMAX2-RV32-LABEL: splat_allones_with_use_v4i64: -; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX2-RV32-NEXT: vle64.v v26, (a0) -; LMULMAX2-RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu -; LMULMAX2-RV32-NEXT: vmv.v.i v28, -1 -; LMULMAX2-RV32-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX2-RV32-NEXT: vadd.vv v26, v26, v28 -; LMULMAX2-RV32-NEXT: vse64.v v26, (a0) -; LMULMAX2-RV32-NEXT: ret +; LMULMAX2-LABEL: splat_allones_with_use_v4i64: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu +; LMULMAX2-NEXT: vle64.v v26, (a0) +; LMULMAX2-NEXT: vadd.vi v26, v26, -1 +; LMULMAX2-NEXT: vse64.v v26, (a0) +; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_allones_with_use_v4i64: ; LMULMAX1-RV32: # %bb.0: @@ -756,22 +662,6 @@ ; LMULMAX1-RV32-NEXT: vse64.v v26, (a1) ; LMULMAX1-RV32-NEXT: ret ; -; LMULMAX8-RV64-LABEL: splat_allones_with_use_v4i64: -; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX8-RV64-NEXT: vle64.v v26, (a0) -; LMULMAX8-RV64-NEXT: vadd.vi v26, v26, -1 -; LMULMAX8-RV64-NEXT: vse64.v v26, (a0) -; LMULMAX8-RV64-NEXT: ret -; -; LMULMAX2-RV64-LABEL: splat_allones_with_use_v4i64: -; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; LMULMAX2-RV64-NEXT: vle64.v v26, (a0) -; LMULMAX2-RV64-NEXT: vadd.vi v26, v26, -1 -; LMULMAX2-RV64-NEXT: vse64.v v26, (a0) -; LMULMAX2-RV64-NEXT: ret -; ; LMULMAX1-RV64-LABEL: splat_allones_with_use_v4i64: ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu @@ -798,15 +688,13 @@ ; LMULMAX8-RV32: # %bb.0: ; LMULMAX8-RV32-NEXT: vsetivli a4, 16, e64,m8,ta,mu ; LMULMAX8-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX8-RV32-NEXT: lui a0, 349525 -; LMULMAX8-RV32-NEXT: addi a0, a0, 1365 -; LMULMAX8-RV32-NEXT: vsetivli a4, 1, e32,m1,ta,mu -; LMULMAX8-RV32-NEXT: vmv.s.x v0, a0 -; LMULMAX8-RV32-NEXT: addi a0, zero, 32 -; LMULMAX8-RV32-NEXT: vsetvli a0, a0, e32,m8,ta,mu ; LMULMAX8-RV32-NEXT: vmv.v.x v16, a2 -; LMULMAX8-RV32-NEXT: vmerge.vxm v16, v16, a1, v0 -; LMULMAX8-RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; LMULMAX8-RV32-NEXT: addi a0, zero, 32 +; LMULMAX8-RV32-NEXT: vsll.vx v16, v16, a0 +; LMULMAX8-RV32-NEXT: vmv.v.x v24, a1 +; LMULMAX8-RV32-NEXT: vsll.vx v24, v24, a0 +; LMULMAX8-RV32-NEXT: vsrl.vx v24, v24, a0 +; LMULMAX8-RV32-NEXT: vor.vv v16, v24, v16 ; LMULMAX8-RV32-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-RV32-NEXT: vse64.v v8, (a3) ; LMULMAX8-RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4 -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4 -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4,LMULMAX4-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4,LMULMAX4-RV64 +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV64 define void @gather_const_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: gather_const_v16i8: @@ -53,13 +53,61 @@ } define void @gather_const_v2i64(<2 x i64>* %x) { -; CHECK-LABEL: gather_const_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; CHECK-NEXT: vle64.v v25, (a0) -; CHECK-NEXT: vrgather.vi v26, v25, 1 -; CHECK-NEXT: vse64.v v26, (a0) -; CHECK-NEXT: ret +; LMULMAX4-RV32-LABEL: gather_const_v2i64: +; LMULMAX4-RV32: # %bb.0: +; LMULMAX4-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; LMULMAX4-RV32-NEXT: vle64.v v25, (a0) +; LMULMAX4-RV32-NEXT: vsetivli a1, 1, e64,m1,ta,mu +; LMULMAX4-RV32-NEXT: vslidedown.vi v25, v25, 1 +; LMULMAX4-RV32-NEXT: vmv.x.s a1, v25 +; LMULMAX4-RV32-NEXT: addi a2, zero, 32 +; LMULMAX4-RV32-NEXT: vsrl.vx v25, v25, a2 +; LMULMAX4-RV32-NEXT: vmv.x.s a3, v25 +; LMULMAX4-RV32-NEXT: vsetivli a4, 2, e64,m1,ta,mu +; LMULMAX4-RV32-NEXT: vmv.v.x v25, a3 +; LMULMAX4-RV32-NEXT: vsll.vx v25, v25, a2 +; LMULMAX4-RV32-NEXT: vmv.v.x v26, a1 +; LMULMAX4-RV32-NEXT: vsll.vx v26, v26, a2 +; LMULMAX4-RV32-NEXT: vsrl.vx v26, v26, a2 +; LMULMAX4-RV32-NEXT: vor.vv v25, v26, v25 +; LMULMAX4-RV32-NEXT: vse64.v v25, (a0) +; LMULMAX4-RV32-NEXT: ret +; +; LMULMAX4-RV64-LABEL: gather_const_v2i64: +; LMULMAX4-RV64: # %bb.0: +; LMULMAX4-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; LMULMAX4-RV64-NEXT: vle64.v v25, (a0) +; LMULMAX4-RV64-NEXT: vrgather.vi v26, v25, 1 +; LMULMAX4-RV64-NEXT: vse64.v v26, (a0) +; LMULMAX4-RV64-NEXT: ret +; +; LMULMAX1-RV32-LABEL: gather_const_v2i64: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; LMULMAX1-RV32-NEXT: vle64.v v25, (a0) +; LMULMAX1-RV32-NEXT: vsetivli a1, 1, e64,m1,ta,mu +; LMULMAX1-RV32-NEXT: vslidedown.vi v25, v25, 1 +; LMULMAX1-RV32-NEXT: vmv.x.s a1, v25 +; LMULMAX1-RV32-NEXT: addi a2, zero, 32 +; LMULMAX1-RV32-NEXT: vsrl.vx v25, v25, a2 +; LMULMAX1-RV32-NEXT: vmv.x.s a3, v25 +; LMULMAX1-RV32-NEXT: vsetivli a4, 2, e64,m1,ta,mu +; LMULMAX1-RV32-NEXT: vmv.v.x v25, a3 +; LMULMAX1-RV32-NEXT: vsll.vx v25, v25, a2 +; LMULMAX1-RV32-NEXT: vmv.v.x v26, a1 +; LMULMAX1-RV32-NEXT: vsll.vx v26, v26, a2 +; LMULMAX1-RV32-NEXT: vsrl.vx v26, v26, a2 +; LMULMAX1-RV32-NEXT: vor.vv v25, v26, v25 +; LMULMAX1-RV32-NEXT: vse64.v v25, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: gather_const_v2i64: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; LMULMAX1-RV64-NEXT: vle64.v v25, (a0) +; LMULMAX1-RV64-NEXT: vrgather.vi v26, v25, 1 +; LMULMAX1-RV64-NEXT: vse64.v v26, (a0) +; LMULMAX1-RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = extractelement <2 x i64> %a, i32 1 %c = insertelement <2 x i64> undef, i64 %b, i32 0 @@ -162,13 +210,33 @@ } define void @gather_const_v8i64(<8 x i64>* %x) { -; LMULMAX4-LABEL: gather_const_v8i64: -; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli a1, 8, e64,m4,ta,mu -; LMULMAX4-NEXT: vle64.v v28, (a0) -; LMULMAX4-NEXT: vrgather.vi v8, v28, 3 -; LMULMAX4-NEXT: vse64.v v8, (a0) -; LMULMAX4-NEXT: ret +; LMULMAX4-RV32-LABEL: gather_const_v8i64: +; LMULMAX4-RV32: # %bb.0: +; LMULMAX4-RV32-NEXT: vsetivli a1, 8, e64,m4,ta,mu +; LMULMAX4-RV32-NEXT: vle64.v v28, (a0) +; LMULMAX4-RV32-NEXT: vsetivli a1, 1, e64,m4,ta,mu +; LMULMAX4-RV32-NEXT: vslidedown.vi v28, v28, 3 +; LMULMAX4-RV32-NEXT: vmv.x.s a1, v28 +; LMULMAX4-RV32-NEXT: addi a2, zero, 32 +; LMULMAX4-RV32-NEXT: vsrl.vx v28, v28, a2 +; LMULMAX4-RV32-NEXT: vmv.x.s a3, v28 +; LMULMAX4-RV32-NEXT: vsetivli a4, 8, e64,m4,ta,mu +; LMULMAX4-RV32-NEXT: vmv.v.x v28, a3 +; LMULMAX4-RV32-NEXT: vsll.vx v28, v28, a2 +; LMULMAX4-RV32-NEXT: vmv.v.x v8, a1 +; LMULMAX4-RV32-NEXT: vsll.vx v8, v8, a2 +; LMULMAX4-RV32-NEXT: vsrl.vx v8, v8, a2 +; LMULMAX4-RV32-NEXT: vor.vv v28, v8, v28 +; LMULMAX4-RV32-NEXT: vse64.v v28, (a0) +; LMULMAX4-RV32-NEXT: ret +; +; LMULMAX4-RV64-LABEL: gather_const_v8i64: +; LMULMAX4-RV64: # %bb.0: +; LMULMAX4-RV64-NEXT: vsetivli a1, 8, e64,m4,ta,mu +; LMULMAX4-RV64-NEXT: vle64.v v28, (a0) +; LMULMAX4-RV64-NEXT: vrgather.vi v8, v28, 3 +; LMULMAX4-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX4-RV64-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v8i64: ; LMULMAX1: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -5622,24 +5622,13 @@ } define void @add_vi_v2i64(<2 x i64>* %x) { -; RV32-LABEL: add_vi_v2i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV32-NEXT: vle64.v v25, (a0) -; RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; RV32-NEXT: vmv.v.i v26, -1 -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV32-NEXT: vadd.vv v25, v25, v26 -; RV32-NEXT: vse64.v v25, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: add_vi_v2i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV64-NEXT: vle64.v v25, (a0) -; RV64-NEXT: vadd.vi v25, v25, -1 -; RV64-NEXT: vse64.v v25, (a0) -; RV64-NEXT: ret +; CHECK-LABEL: add_vi_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; CHECK-NEXT: vle64.v v25, (a0) +; CHECK-NEXT: vadd.vi v25, v25, -1 +; CHECK-NEXT: vse64.v v25, (a0) +; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 -1, i32 0 %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer @@ -5860,25 +5849,14 @@ } define void @sub_vi_v2i64(<2 x i64>* %x) { -; RV32-LABEL: sub_vi_v2i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV32-NEXT: vle64.v v25, (a0) -; RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; RV32-NEXT: vmv.v.i v26, -1 -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV32-NEXT: vsub.vv v25, v25, v26 -; RV32-NEXT: vse64.v v25, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: sub_vi_v2i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV64-NEXT: vle64.v v25, (a0) -; RV64-NEXT: addi a1, zero, -1 -; RV64-NEXT: vsub.vx v25, v25, a1 -; RV64-NEXT: vse64.v v25, (a0) -; RV64-NEXT: ret +; CHECK-LABEL: sub_vi_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; CHECK-NEXT: vle64.v v25, (a0) +; CHECK-NEXT: addi a1, zero, -1 +; CHECK-NEXT: vsub.vx v25, v25, a1 +; CHECK-NEXT: vse64.v v25, (a0) +; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 -1, i32 0 %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer @@ -6640,24 +6618,13 @@ } define void @xor_vi_v2i64(<2 x i64>* %x) { -; RV32-LABEL: xor_vi_v2i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV32-NEXT: vle64.v v25, (a0) -; RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; RV32-NEXT: vmv.v.i v26, -1 -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV32-NEXT: vxor.vv v25, v25, v26 -; RV32-NEXT: vse64.v v25, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: xor_vi_v2i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV64-NEXT: vle64.v v25, (a0) -; RV64-NEXT: vxor.vi v25, v25, -1 -; RV64-NEXT: vse64.v v25, (a0) -; RV64-NEXT: ret +; CHECK-LABEL: xor_vi_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; CHECK-NEXT: vle64.v v25, (a0) +; CHECK-NEXT: vxor.vi v25, v25, -1 +; CHECK-NEXT: vse64.v v25, (a0) +; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 -1, i32 0 %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer @@ -7437,16 +7404,16 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu ; RV32-NEXT: vle64.v v25, (a0) -; RV32-NEXT: addi a1, zero, 5 -; RV32-NEXT: vsetivli a2, 1, e8,m1,ta,mu -; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: lui a1, 699051 ; RV32-NEXT: addi a2, a1, -1366 -; RV32-NEXT: vsetivli a3, 4, e32,m1,ta,mu ; RV32-NEXT: vmv.v.x v26, a2 +; RV32-NEXT: addi a2, zero, 32 +; RV32-NEXT: vsll.vx v26, v26, a2 ; RV32-NEXT: addi a1, a1, -1365 -; RV32-NEXT: vmerge.vxm v26, v26, a1, v0 -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; RV32-NEXT: vmv.v.x v27, a1 +; RV32-NEXT: vsll.vx v27, v27, a2 +; RV32-NEXT: vsrl.vx v27, v27, a2 +; RV32-NEXT: vor.vv v26, v27, v26 ; RV32-NEXT: vmulhu.vv v25, v25, v26 ; RV32-NEXT: vsrl.vi v25, v25, 1 ; RV32-NEXT: vse64.v v25, (a0) @@ -7559,16 +7526,16 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu ; RV32-NEXT: vle64.v v25, (a0) -; RV32-NEXT: addi a1, zero, 5 -; RV32-NEXT: vsetivli a2, 1, e8,m1,ta,mu -; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a2, a1, 1365 -; RV32-NEXT: vsetivli a3, 4, e32,m1,ta,mu ; RV32-NEXT: vmv.v.x v26, a2 +; RV32-NEXT: addi a2, zero, 32 +; RV32-NEXT: vsll.vx v26, v26, a2 ; RV32-NEXT: addi a1, a1, 1366 -; RV32-NEXT: vmerge.vxm v26, v26, a1, v0 -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; RV32-NEXT: vmv.v.x v27, a1 +; RV32-NEXT: vsll.vx v27, v27, a2 +; RV32-NEXT: vsrl.vx v27, v27, a2 +; RV32-NEXT: vor.vv v26, v27, v26 ; RV32-NEXT: vmulh.vv v25, v25, v26 ; RV32-NEXT: addi a1, zero, 63 ; RV32-NEXT: vsrl.vx v26, v25, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll @@ -54,26 +54,14 @@ declare <1 x i32> @llvm.masked.load.v1i32(<1 x i32>*, i32, <1 x i1>, <1 x i32>) define void @masked_load_v1i64(<1 x i64>* %a, <1 x i64>* %m_ptr, <1 x i64>* %res_ptr) nounwind { -; RV32-LABEL: masked_load_v1i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 1, e64,m1,ta,mu -; RV32-NEXT: vle64.v v25, (a1) -; RV32-NEXT: vsetivli a1, 2, e32,m1,ta,mu -; RV32-NEXT: vmv.v.i v26, 0 -; RV32-NEXT: vsetivli a1, 1, e64,m1,ta,mu -; RV32-NEXT: vmseq.vv v0, v25, v26 -; RV32-NEXT: vle64.v v25, (a0), v0.t -; RV32-NEXT: vse64.v v25, (a2) -; RV32-NEXT: ret -; -; RV64-LABEL: masked_load_v1i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 1, e64,m1,ta,mu -; RV64-NEXT: vle64.v v25, (a1) -; RV64-NEXT: vmseq.vi v0, v25, 0 -; RV64-NEXT: vle64.v v25, (a0), v0.t -; RV64-NEXT: vse64.v v25, (a2) -; RV64-NEXT: ret +; CHECK-LABEL: masked_load_v1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; CHECK-NEXT: vle64.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle64.v v25, (a0), v0.t +; CHECK-NEXT: vse64.v v25, (a2) +; CHECK-NEXT: ret %m = load <1 x i64>, <1 x i64>* %m_ptr %mask = icmp eq <1 x i64> %m, zeroinitializer %load = call <1 x i64> @llvm.masked.load.v1i64(<1 x i64>* %a, i32 8, <1 x i1> %mask, <1 x i64> undef) @@ -134,26 +122,14 @@ declare <2 x i32> @llvm.masked.load.v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>) define void @masked_load_v2i64(<2 x i64>* %a, <2 x i64>* %m_ptr, <2 x i64>* %res_ptr) nounwind { -; RV32-LABEL: masked_load_v2i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu -; RV32-NEXT: vle64.v v25, (a1) -; RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; RV32-NEXT: vmv.v.i v26, 0 -; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; RV32-NEXT: vmseq.vv v0, v25, v26 -; RV32-NEXT: vle64.v v25, (a0), v0.t -; RV32-NEXT: vse64.v v25, (a2) -; RV32-NEXT: ret -; -; RV64-LABEL: masked_load_v2i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 2, e64,m1,ta,mu -; RV64-NEXT: vle64.v v25, (a1) -; RV64-NEXT: vmseq.vi v0, v25, 0 -; RV64-NEXT: vle64.v v25, (a0), v0.t -; RV64-NEXT: vse64.v v25, (a2) -; RV64-NEXT: ret +; CHECK-LABEL: masked_load_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; CHECK-NEXT: vle64.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle64.v v25, (a0), v0.t +; CHECK-NEXT: vse64.v v25, (a2) +; CHECK-NEXT: ret %m = load <2 x i64>, <2 x i64>* %m_ptr %mask = icmp eq <2 x i64> %m, zeroinitializer %load = call <2 x i64> @llvm.masked.load.v2i64(<2 x i64>* %a, i32 8, <2 x i1> %mask, <2 x i64> undef) @@ -214,26 +190,14 @@ declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>) define void @masked_load_v4i64(<4 x i64>* %a, <4 x i64>* %m_ptr, <4 x i64>* %res_ptr) nounwind { -; RV32-LABEL: masked_load_v4i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 4, e64,m2,ta,mu -; RV32-NEXT: vle64.v v26, (a1) -; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu -; RV32-NEXT: vmv.v.i v28, 0 -; RV32-NEXT: vsetivli a1, 4, e64,m2,ta,mu -; RV32-NEXT: vmseq.vv v0, v26, v28 -; RV32-NEXT: vle64.v v26, (a0), v0.t -; RV32-NEXT: vse64.v v26, (a2) -; RV32-NEXT: ret -; -; RV64-LABEL: masked_load_v4i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 4, e64,m2,ta,mu -; RV64-NEXT: vle64.v v26, (a1) -; RV64-NEXT: vmseq.vi v0, v26, 0 -; RV64-NEXT: vle64.v v26, (a0), v0.t -; RV64-NEXT: vse64.v v26, (a2) -; RV64-NEXT: ret +; CHECK-LABEL: masked_load_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; CHECK-NEXT: vle64.v v26, (a1) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vle64.v v26, (a0), v0.t +; CHECK-NEXT: vse64.v v26, (a2) +; CHECK-NEXT: ret %m = load <4 x i64>, <4 x i64>* %m_ptr %mask = icmp eq <4 x i64> %m, zeroinitializer %load = call <4 x i64> @llvm.masked.load.v4i64(<4 x i64>* %a, i32 8, <4 x i1> %mask, <4 x i64> undef) @@ -294,26 +258,14 @@ declare <8 x i32> @llvm.masked.load.v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) define void @masked_load_v8i64(<8 x i64>* %a, <8 x i64>* %m_ptr, <8 x i64>* %res_ptr) nounwind { -; RV32-LABEL: masked_load_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 8, e64,m4,ta,mu -; RV32-NEXT: vle64.v v28, (a1) -; RV32-NEXT: vsetivli a1, 16, e32,m4,ta,mu -; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli a1, 8, e64,m4,ta,mu -; RV32-NEXT: vmseq.vv v0, v28, v8 -; RV32-NEXT: vle64.v v28, (a0), v0.t -; RV32-NEXT: vse64.v v28, (a2) -; RV32-NEXT: ret -; -; RV64-LABEL: masked_load_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 8, e64,m4,ta,mu -; RV64-NEXT: vle64.v v28, (a1) -; RV64-NEXT: vmseq.vi v0, v28, 0 -; RV64-NEXT: vle64.v v28, (a0), v0.t -; RV64-NEXT: vse64.v v28, (a2) -; RV64-NEXT: ret +; CHECK-LABEL: masked_load_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a1) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vle64.v v28, (a0), v0.t +; CHECK-NEXT: vse64.v v28, (a2) +; CHECK-NEXT: ret %m = load <8 x i64>, <8 x i64>* %m_ptr %mask = icmp eq <8 x i64> %m, zeroinitializer %load = call <8 x i64> @llvm.masked.load.v8i64(<8 x i64>* %a, i32 8, <8 x i1> %mask, <8 x i64> undef) @@ -374,27 +326,14 @@ declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>) define void @masked_load_v16i64(<16 x i64>* %a, <16 x i64>* %m_ptr, <16 x i64>* %res_ptr) nounwind { -; RV32-LABEL: masked_load_v16i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 16, e64,m8,ta,mu -; RV32-NEXT: vle64.v v8, (a1) -; RV32-NEXT: addi a1, zero, 32 -; RV32-NEXT: vsetvli a1, a1, e32,m8,ta,mu -; RV32-NEXT: vmv.v.i v16, 0 -; RV32-NEXT: vsetivli a1, 16, e64,m8,ta,mu -; RV32-NEXT: vmseq.vv v0, v8, v16 -; RV32-NEXT: vle64.v v8, (a0), v0.t -; RV32-NEXT: vse64.v v8, (a2) -; RV32-NEXT: ret -; -; RV64-LABEL: masked_load_v16i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 16, e64,m8,ta,mu -; RV64-NEXT: vle64.v v8, (a1) -; RV64-NEXT: vmseq.vi v0, v8, 0 -; RV64-NEXT: vle64.v v8, (a0), v0.t -; RV64-NEXT: vse64.v v8, (a2) -; RV64-NEXT: ret +; CHECK-LABEL: masked_load_v16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: vse64.v v8, (a2) +; CHECK-NEXT: ret %m = load <16 x i64>, <16 x i64>* %m_ptr %mask = icmp eq <16 x i64> %m, zeroinitializer %load = call <16 x i64> @llvm.masked.load.v16i64(<16 x i64>* %a, i32 8, <16 x i1> %mask, <16 x i64> undef) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll @@ -54,26 +54,14 @@ declare void @llvm.masked.store.v1i32.p0v1i32(<1 x i32>, <1 x i32>*, i32, <1 x i1>) define void @masked_store_v1i64(<1 x i64>* %val_ptr, <1 x i64>* %a, <1 x i64>* %m_ptr) nounwind { -; RV32-LABEL: masked_store_v1i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 1, e64,m1,ta,mu -; RV32-NEXT: vle64.v v25, (a2) -; RV32-NEXT: vsetivli a2, 2, e32,m1,ta,mu -; RV32-NEXT: vmv.v.i v26, 0 -; RV32-NEXT: vsetivli a2, 1, e64,m1,ta,mu -; RV32-NEXT: vle64.v v27, (a0) -; RV32-NEXT: vmseq.vv v0, v25, v26 -; RV32-NEXT: vse64.v v27, (a1), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_store_v1i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 1, e64,m1,ta,mu -; RV64-NEXT: vle64.v v25, (a2) -; RV64-NEXT: vle64.v v26, (a0) -; RV64-NEXT: vmseq.vi v0, v25, 0 -; RV64-NEXT: vse64.v v26, (a1), v0.t -; RV64-NEXT: ret +; CHECK-LABEL: masked_store_v1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; CHECK-NEXT: vle64.v v25, (a2) +; CHECK-NEXT: vle64.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse64.v v26, (a1), v0.t +; CHECK-NEXT: ret %m = load <1 x i64>, <1 x i64>* %m_ptr %mask = icmp eq <1 x i64> %m, zeroinitializer %val = load <1 x i64>, <1 x i64>* %val_ptr @@ -134,26 +122,14 @@ declare void @llvm.masked.store.v2i32.p0v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>) define void @masked_store_v2i64(<2 x i64>* %val_ptr, <2 x i64>* %a, <2 x i64>* %m_ptr) nounwind { -; RV32-LABEL: masked_store_v2i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu -; RV32-NEXT: vle64.v v25, (a2) -; RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu -; RV32-NEXT: vmv.v.i v26, 0 -; RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu -; RV32-NEXT: vle64.v v27, (a0) -; RV32-NEXT: vmseq.vv v0, v25, v26 -; RV32-NEXT: vse64.v v27, (a1), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_store_v2i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 2, e64,m1,ta,mu -; RV64-NEXT: vle64.v v25, (a2) -; RV64-NEXT: vle64.v v26, (a0) -; RV64-NEXT: vmseq.vi v0, v25, 0 -; RV64-NEXT: vse64.v v26, (a1), v0.t -; RV64-NEXT: ret +; CHECK-LABEL: masked_store_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; CHECK-NEXT: vle64.v v25, (a2) +; CHECK-NEXT: vle64.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse64.v v26, (a1), v0.t +; CHECK-NEXT: ret %m = load <2 x i64>, <2 x i64>* %m_ptr %mask = icmp eq <2 x i64> %m, zeroinitializer %val = load <2 x i64>, <2 x i64>* %val_ptr @@ -214,26 +190,14 @@ declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>) define void @masked_store_v4i64(<4 x i64>* %val_ptr, <4 x i64>* %a, <4 x i64>* %m_ptr) nounwind { -; RV32-LABEL: masked_store_v4i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 4, e64,m2,ta,mu -; RV32-NEXT: vle64.v v26, (a2) -; RV32-NEXT: vsetivli a2, 8, e32,m2,ta,mu -; RV32-NEXT: vmv.v.i v28, 0 -; RV32-NEXT: vsetivli a2, 4, e64,m2,ta,mu -; RV32-NEXT: vle64.v v30, (a0) -; RV32-NEXT: vmseq.vv v0, v26, v28 -; RV32-NEXT: vse64.v v30, (a1), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_store_v4i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 4, e64,m2,ta,mu -; RV64-NEXT: vle64.v v26, (a2) -; RV64-NEXT: vle64.v v28, (a0) -; RV64-NEXT: vmseq.vi v0, v26, 0 -; RV64-NEXT: vse64.v v28, (a1), v0.t -; RV64-NEXT: ret +; CHECK-LABEL: masked_store_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; CHECK-NEXT: vle64.v v26, (a2) +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vse64.v v28, (a1), v0.t +; CHECK-NEXT: ret %m = load <4 x i64>, <4 x i64>* %m_ptr %mask = icmp eq <4 x i64> %m, zeroinitializer %val = load <4 x i64>, <4 x i64>* %val_ptr @@ -294,26 +258,14 @@ declare void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>) define void @masked_store_v8i64(<8 x i64>* %val_ptr, <8 x i64>* %a, <8 x i64>* %m_ptr) nounwind { -; RV32-LABEL: masked_store_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 8, e64,m4,ta,mu -; RV32-NEXT: vle64.v v28, (a2) -; RV32-NEXT: vsetivli a2, 16, e32,m4,ta,mu -; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli a2, 8, e64,m4,ta,mu -; RV32-NEXT: vle64.v v12, (a0) -; RV32-NEXT: vmseq.vv v0, v28, v8 -; RV32-NEXT: vse64.v v12, (a1), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_store_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 8, e64,m4,ta,mu -; RV64-NEXT: vle64.v v28, (a2) -; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vmseq.vi v0, v28, 0 -; RV64-NEXT: vse64.v v8, (a1), v0.t -; RV64-NEXT: ret +; CHECK-LABEL: masked_store_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a2) +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vse64.v v8, (a1), v0.t +; CHECK-NEXT: ret %m = load <8 x i64>, <8 x i64>* %m_ptr %mask = icmp eq <8 x i64> %m, zeroinitializer %val = load <8 x i64>, <8 x i64>* %val_ptr @@ -374,27 +326,14 @@ declare void @llvm.masked.store.v16i32.p0v16i32(<16 x i32>, <16 x i32>*, i32, <16 x i1>) define void @masked_store_v16i64(<16 x i64>* %val_ptr, <16 x i64>* %a, <16 x i64>* %m_ptr) nounwind { -; RV32-LABEL: masked_store_v16i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli a3, 16, e64,m8,ta,mu -; RV32-NEXT: vle64.v v8, (a2) -; RV32-NEXT: addi a2, zero, 32 -; RV32-NEXT: vsetvli a2, a2, e32,m8,ta,mu -; RV32-NEXT: vmv.v.i v16, 0 -; RV32-NEXT: vsetivli a2, 16, e64,m8,ta,mu -; RV32-NEXT: vle64.v v24, (a0) -; RV32-NEXT: vmseq.vv v0, v8, v16 -; RV32-NEXT: vse64.v v24, (a1), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_store_v16i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli a3, 16, e64,m8,ta,mu -; RV64-NEXT: vle64.v v8, (a2) -; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vmseq.vi v0, v8, 0 -; RV64-NEXT: vse64.v v16, (a1), v0.t -; RV64-NEXT: ret +; CHECK-LABEL: masked_store_v16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a2) +; CHECK-NEXT: vle64.v v16, (a0) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vse64.v v16, (a1), v0.t +; CHECK-NEXT: ret %m = load <16 x i64>, <16 x i64>* %m_ptr %mask = icmp eq <16 x i64> %m, zeroinitializer %val = load <16 x i64>, <16 x i64>* %val_ptr