Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5193,16 +5193,20 @@ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); MVT VT = Op->getSimpleValueType(0); - MVT ContainerVT = getContainerForFixedLengthVector(VT); + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) + ContainerVT = getContainerForFixedLengthVector(VT); SDValue PassThru = Op.getOperand(2); if (!IsUnmasked) { MVT MaskVT = getMaskTypeFor(ContainerVT); - Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); - PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); + if (VT.isFixedLengthVector()) { + Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); + } } - SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); + SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; SDValue IntID = DAG.getTargetConstant( IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL, @@ -5229,7 +5233,8 @@ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, Load->getMemoryVT(), Load->getMemOperand()); SDValue Chain = Result.getValue(1); - Result = convertFromScalableVector(VT, Result, DAG, Subtarget); + if (VT.isFixedLengthVector()) + Result = convertFromScalableVector(VT, Result, DAG, Subtarget); return DAG.getMergeValues({Result, Chain}, DL); } case Intrinsic::riscv_seg2_load: @@ -5293,15 +5298,18 @@ SDValue Val = Op.getOperand(2); MVT VT = Val.getSimpleValueType(); - MVT ContainerVT = getContainerForFixedLengthVector(VT); - - Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) { + ContainerVT = getContainerForFixedLengthVector(VT); + Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); + } if (!IsUnmasked) { MVT MaskVT = getMaskTypeFor(ContainerVT); - Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + if (VT.isFixedLengthVector()) + Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); } - SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); + SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; SDValue IntID = DAG.getTargetConstant( IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL, Index: llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll +++ llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll @@ -110,3 +110,26 @@ ret void } +declare void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64(, ptr, i64, ) + +declare @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(, ptr, i64, ) + +define @strided_load_vscale_i64(ptr %p, i64 %stride, %m) { +; CHECK-LABEL: strided_load_vscale_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret + %res = call @llvm.riscv.masked.strided.load.nxv1i64.p0.i64( undef, ptr %p, i64 %stride, %m) + ret %res +} + +define void @strided_store_vscale_i64(ptr %p, %v, i64 %stride, %m) { +; CHECK-LABEL: strided_store_vscale_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret + call void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64( %v, ptr %p, i64 %stride, %m) + ret void +}