diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4741,25 +4741,37 @@ Mask = MStore->getMask(); } + bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); + MVT VT = Val.getSimpleValueType(); MVT XLenVT = Subtarget.getXLenVT(); MVT ContainerVT = VT; if (VT.isFixedLengthVector()) { ContainerVT = getContainerForFixedLengthVector(VT); - MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); - Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + if (!IsUnmasked) { + MVT MaskVT = + MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); + Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + } } if (!VL) VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; - SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT); - return DAG.getMemIntrinsicNode( - ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), - {Chain, IntID, Val, BasePtr, Mask, VL}, MemVT, MMO); + unsigned IntID = + IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask; + SmallVector Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; + Ops.push_back(Val); + Ops.push_back(BasePtr); + if (!IsUnmasked) + Ops.push_back(Mask); + Ops.push_back(VL); + + return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, + DAG.getVTList(MVT::Other), Ops, MemVT, MMO); } SDValue diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll @@ -255,3 +255,15 @@ call void @llvm.vp.store.v8f64(<8 x double> %val, <8 x double>* %ptr, <8 x i1> %m, i32 %evl) ret void } + +define void @vpstore_v2i8_allones_mask(<2 x i8> %val, <2 x i8>* %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vpstore_v2i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret + %a = insertelement <2 x i1> undef, i1 true, i32 0 + %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer + call void @llvm.vp.store.v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %b, i32 %evl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -339,3 +339,15 @@ call void @llvm.vp.store.nxv8f64( %val, * %ptr, %m, i32 %evl) ret void } + +define void @vpstore_nxv1i8_allones_mask( %val, * %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vpstore_nxv1i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret + %a = insertelement undef, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + call void @llvm.vp.store.nxv1i8( %val, * %ptr, %b, i32 %evl) + ret void +}