Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2371,6 +2371,16 @@ return DAG.getVScale(DL, VT, C0 + C1); } + // fold a+vscale(c1)+vscale(c2) -> a+vscale(c1+c2) + if ((N0.getOpcode() == ISD::ADD) && + (N0.getOperand(1).getOpcode() == ISD::VSCALE) && + (N1.getOpcode() == ISD::VSCALE)) { + auto VS0 = N0.getOperand(1)->getConstantOperandAPInt(0); + auto VS1 = N1->getConstantOperandAPInt(0); + auto VS = DAG.getVScale(DL, VT, VS0 + VS1); + return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), VS); + } + return SDValue(); } Index: llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1538,10 +1538,24 @@ MMOFlags, AAInfo); unsigned IncrementSize = LoMemVT.getSizeInBits()/8; - Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); + + SDValue BytesIncrement; + MachinePointerInfo MPI; + if (LoVT.isScalableVector()) { + BytesIncrement = DAG.getVScale( + dl, Ptr.getValueType(), + APInt(Ptr.getValueType().getSizeInBits().getKnownMinSize(), + IncrementSize)); + MPI = LD->getPointerInfo(); + Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, BytesIncrement); + } else { + BytesIncrement = DAG.getConstant(IncrementSize, dl, Ptr.getValueType()); + MPI = LD->getPointerInfo().getWithOffset(IncrementSize); + Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); + } + Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset, - LD->getPointerInfo().getWithOffset(IncrementSize), HiMemVT, - LD->getOriginalAlign(), MMOFlags, AAInfo); + MPI, HiMemVT, LD->getOriginalAlign(), MMOFlags, AAInfo); // Build a factor node to remember that this load is independent of the // other one. Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1675,8 +1675,15 @@ multiclass unpred_load { - def : Pat<(Ty (Load (am_sve_fi GPR64sp:$base, simm4s1:$offset))), - (RegImmInst (PTrue 31), GPR64sp:$base, simm4s1:$offset)>; + let AddedComplexity = 1 in { + def _imm: Pat<(Ty (Load (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset))), + (RegImmInst (PTrue 31), GPR64sp:$base, simm4s1:$offset)>; + } + + let AddedComplexity = 2 in { + def _fi : Pat<(Ty (Load (am_sve_fi GPR64sp:$base, simm4s1:$offset))), + (RegImmInst (PTrue 31), GPR64sp:$base, simm4s1:$offset)>; + } def : Pat<(Ty (Load GPR64:$base)), (RegImmInst (PTrue 31), GPR64:$base, (i64 0))>; Index: llvm/test/CodeGen/AArch64/sve-ldst.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-ldst.ll @@ -0,0 +1,55 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; LOAD + +define @load_promote_4i8(* %a) { +; CHECK-LABEL: load_promote_4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = load , * %a + ret %load +} + +define @load_split_i16(* %a) { +; CHECK-LABEL: load_split_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %load = load , * %a + ret %load +} + +define @load_split_32i16(* %a) { +; CHECK-LABEL: load_split_32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z2.h }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1h { z3.h }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ret + %load = load , * %a + ret %load +} + +define @load_split_16i64(* %a) { +; CHECK-LABEL: load_split_16i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #5, mul vl] +; CHECK-NEXT: ld1d { z6.d }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ld1d { z7.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %load = load , * %a + ret %load +}