Index: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -7777,7 +7777,7 @@ EVT StVT = ST->getMemoryVT(); if (StVT.isScalableVector()) - report_fatal_error("Cannot scalarize scalable vector stores"); + return SDValue(); // The type of the data we want to save EVT RegVT = Value.getValueType(); Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1217,7 +1217,7 @@ // Firstly, exclude all scalable vector extending loads/truncating stores. for (MVT VT : MVT::integer_scalable_vector_valuetypes()) { for (MVT InnerVT : MVT::integer_scalable_vector_valuetypes()) { - // TODO: truncating stores should also be exclude + setTruncStoreAction(VT, InnerVT, Expand); setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); @@ -1225,6 +1225,12 @@ } // Then, selectively enable those which we directly support. + setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i8, Legal); + setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i16, Legal); + setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i32, Legal); + setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i8, Legal); + setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i16, Legal); + setTruncStoreAction(MVT::nxv8i16, MVT::nxv8i8, Legal); for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) { setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i8, Legal); setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i16, Legal);