diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5162,16 +5162,20 @@ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); MVT VT = Op->getSimpleValueType(0); - MVT ContainerVT = getContainerForFixedLengthVector(VT); + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) + ContainerVT = getContainerForFixedLengthVector(VT); SDValue PassThru = Op.getOperand(2); if (!IsUnmasked) { MVT MaskVT = getMaskTypeFor(ContainerVT); - Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); - PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); + if (VT.isFixedLengthVector()) { + Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); + } } - SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); + SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; SDValue IntID = DAG.getTargetConstant( IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL, @@ -5198,7 +5202,8 @@ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, Load->getMemoryVT(), Load->getMemOperand()); SDValue Chain = Result.getValue(1); - Result = convertFromScalableVector(VT, Result, DAG, Subtarget); + if (VT.isFixedLengthVector()) + Result = convertFromScalableVector(VT, Result, DAG, Subtarget); return DAG.getMergeValues({Result, Chain}, DL); } case Intrinsic::riscv_seg2_load: @@ -5262,15 +5267,18 @@ SDValue Val = Op.getOperand(2); MVT VT = Val.getSimpleValueType(); - MVT ContainerVT = getContainerForFixedLengthVector(VT); - - Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) { + ContainerVT = getContainerForFixedLengthVector(VT); + Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); + } if (!IsUnmasked) { MVT MaskVT = getMaskTypeFor(ContainerVT); - Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + if (VT.isFixedLengthVector()) + Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); } - SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); + SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; SDValue IntID = DAG.getTargetConstant( IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL, diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll @@ -110,3 +110,26 @@ ret void } +declare void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64(, ptr, i64, ) + +declare @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(, ptr, i64, ) + +define @strided_load_vscale_i64(ptr %p, i64 %stride, %m) { +; CHECK-LABEL: strided_load_vscale_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret + %res = call @llvm.riscv.masked.strided.load.nxv1i64.p0.i64( undef, ptr %p, i64 %stride, %m) + ret %res +} + +define void @strided_store_vscale_i64(ptr %p, %v, i64 %stride, %m) { +; CHECK-LABEL: strided_store_vscale_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret + call void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64( %v, ptr %p, i64 %stride, %m) + ret void +}