Index: llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
===================================================================
--- llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -146,6 +146,7 @@
     OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
     return true;
   }
+  bool SelectAddrModePred(SDValue N, SDValue &Base, SDValue &OffImm);
 
   template<int Width>
   bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
@@ -993,6 +994,26 @@
   return true;
 }
 
+bool AArch64DAGToDAGISel::SelectAddrModePred(SDValue N, SDValue &Base,
+                                             SDValue &OffImm) {
+  SDLoc dl(N);
+
+  // If this is not a frame index, load directly from this address
+  if (N->getOpcode() != ISD::FrameIndex) {
+    Base = N;
+    OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
+    return true;
+  }
+
+  // Otherwise, match it for the frame address
+  const DataLayout &DL = CurDAG->getDataLayout();
+  const TargetLowering *TLI = getTargetLowering();
+  int FI = cast<FrameIndexSDNode>(N)->getIndex();
+  Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
+  OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
+  return true;
+}
+
 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
 /// immediate" address.  This should only match when there is an offset that
 /// is not valid for a scaled immediate addressing mode.  The "Size" argument
Index: llvm/lib/Target/AArch64/AArch64InstrFormats.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -3909,6 +3909,8 @@
 def am_unscaled64 : ComplexPattern<iPTR, 2, "SelectAddrModeUnscaled64", []>;
 def am_unscaled128 :ComplexPattern<iPTR, 2, "SelectAddrModeUnscaled128", []>;
 
+def am_sve_pred : ComplexPattern<iPTR, 2, "SelectAddrModePred", []>;
+
 def gi_am_unscaled8 :
     GIComplexOperandMatcher<s64, "selectAddrModeUnscaled8">,
     GIComplexPatternEquiv<am_unscaled8>;
Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -2314,6 +2314,16 @@
   def : Pat<(nxv2i1 (and PPR:$Ps1, PPR:$Ps2)),
             (AND_PPzPP (PTRUE_D 31), PPR:$Ps1, PPR:$Ps2)>;
 
+  // Add select addressing mode load/store
+  foreach type = ["nxv16i1", "nxv8i1", "nxv4i1", "nxv2i1"] in {
+    def : Pat< (!cast<ValueType>(type)
+                         (load (am_sve_pred GPR64sp:$base, simm9:$offset))),
+                (LDR_PXI GPR64sp:$base, simm9:$offset)>;
+    def : Pat<(store (!cast<ValueType>(type) PPR:$val),
+                         (am_sve_pred GPR64sp:$base, simm9:$offset)),
+               (STR_PXI PPR:$val, GPR64sp:$base, simm9:$offset)>;
+  }
+
   // Add more complex addressing modes here as required
   multiclass pred_load<ValueType Ty, ValueType PredTy, SDPatternOperator Load,
                        Instruction RegRegInst, Instruction RegImmInst, ComplexPattern AddrCP> {
Index: llvm/test/CodeGen/AArch64/aarch64-sve-ldst.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/AArch64/aarch64-sve-ldst.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s
+
+; Support a new select address mode load/store
+
+;;; LOAD
+
+define <vscale x 16 x i1> @sve_load16(<vscale x 16 x i1>* %base) {
+; CHECK-LABEL: sve_load16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %src = load <vscale x 16 x i1>, <vscale x 16 x i1>* %base, align 1
+  ret <vscale x 16 x i1> %src
+}
+
+define <vscale x 8 x i1> @sve_load8(<vscale x 8 x i1>* %base) {
+; CHECK-LABEL: sve_load8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %src = load <vscale x 8 x i1>, <vscale x 8 x i1>* %base, align 1
+  ret <vscale x 8 x i1> %src
+}
+
+define <vscale x 4 x i1> @sve_load4(<vscale x 4 x i1>* %base) {
+; CHECK-LABEL: sve_load4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %src = load <vscale x 4 x i1>, <vscale x 4 x i1>* %base, align 1
+  ret <vscale x 4 x i1> %src
+}
+
+define <vscale x 2 x i1> @sve_load2(<vscale x 2 x i1>* %base) {
+; CHECK-LABEL: sve_load2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %src = load <vscale x 2 x i1>, <vscale x 2 x i1>* %base, align 1
+  ret <vscale x 2 x i1> %src
+}
+
+;;; STORE
+
+define void @sve_store16(<vscale x 16 x i1>* %base, <vscale x 16 x i1> %src) {
+; CHECK-LABEL: sve_store16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  store <vscale x 16 x i1> %src, <vscale x 16 x i1>* %base, align 1
+  ret void
+}
+
+define void @sve_store8(<vscale x 8 x i1>* %base, <vscale x 8 x i1> %src) {
+; CHECK-LABEL: sve_store8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  store <vscale x 8 x i1> %src, <vscale x 8 x i1>* %base, align 1
+  ret void
+}
+
+define void @sve_store4(<vscale x 4 x i1>* %base, <vscale x 4 x i1> %src) {
+; CHECK-LABEL: sve_store4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  store <vscale x 4 x i1> %src, <vscale x 4 x i1>* %base, align 1
+  ret void
+}
+
+define void @sve_store2(<vscale x 2 x i1>* %base, <vscale x 2 x i1> %src) {
+; CHECK-LABEL: sve_store2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  store <vscale x 2 x i1> %src, <vscale x 2 x i1>* %base, align 1
+  ret void
+}
+