Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1359,6 +1359,14 @@ case AArch64::LDRQui: case AArch64::LDRXui: case AArch64::LDRWui: + case AArch64::LDRSWui: + // Unscaled instructions. + case AArch64::LDURSi: + case AArch64::LDURDi: + case AArch64::LDURQi: + case AArch64::LDURWi: + case AArch64::LDURXi: + case AArch64::LDURSWi: unsigned Width; return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI); }; @@ -1428,6 +1436,7 @@ Scale = Width = 8; break; case AArch64::LDRWui: + case AArch64::LDRSWui: case AArch64::LDRSui: case AArch64::STRWui: case AArch64::STRSui: @@ -1463,14 +1472,49 @@ return false; if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode()) return false; - // getMemOpBaseRegImmOfs guarantees that oper 2 isImm. - unsigned Ofs1 = FirstLdSt->getOperand(2).getImm(); - // Allow 6 bits of positive range. - if (Ofs1 > 64) + + // getMemOpBaseRegImmOfs guarantees that operand 2 isImm. + int64_t Offset1 = FirstLdSt->getOperand(2).getImm(); + int64_t Offset2 = SecondLdSt->getOperand(2).getImm(); + + // Scale the unscaled offsets. + if (isUnscaledLdSt(FirstLdSt)) { + unsigned OffsetStride = 1; + switch (FirstLdSt->getOpcode()) { + default: + return false; + case AArch64::LDURQi: + OffsetStride = 16; + break; + case AArch64::LDURXi: + case AArch64::LDURDi: + OffsetStride = 8; + break; + case AArch64::LDURWi: + case AArch64::LDURSi: + case AArch64::LDURSWi: + OffsetStride = 4; + break; + } + // If the byte-offset isn't a multiple of the stride, we can't pair these + // loads/stores. + if (Offset1 % OffsetStride != 0) + return false; + if (Offset2 % OffsetStride != 0) + return false; + + // Convert the byte-offset used by unscaled into an "element" offset used + // by the scaled pair load/store instructions. + Offset1 /= OffsetStride; + Offset2 /= OffsetStride; + } + // Pairwise instructions have a 7-bit signed offset field. + if (Offset1 > 63 || Offset1 < -64) return false; + // The caller should already have ordered First/SecondLdSt by offset. - unsigned Ofs2 = SecondLdSt->getOperand(2).getImm(); - return Ofs1 + 1 == Ofs2; + assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); + return Offset1 + 1 == Offset2; } bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First, Index: test/CodeGen/AArch64/arm64-ldp-cluster.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/arm64-ldp-cluster.ll @@ -0,0 +1,29 @@ +; REQUIRES: asserts +; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s + +; Test ldpsw clustering +; CHECK: ********** MI Scheduling ********** +; CHECK: ldp_sext_int:BB#0 +; CHECK: Cluster loads SU(1) - SU(2) +define i64 @ldp_sext_int(i32* %p) nounwind { + %tmp = load i32, i32* %p, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i64 1 + %tmp1 = load i32, i32* %add.ptr, align 4 + %sexttmp = sext i32 %tmp to i64 + %sexttmp1 = sext i32 %tmp1 to i64 + %add = add nsw i64 %sexttmp1, %sexttmp + ret i64 %add +} + +; Test ldur clustering. +define i32 @ldur_int(i32* %a) nounwind { +; CHECK: ********** MI Scheduling ********** +; CHECK: ldur_int:BB#0 +; CHECK: Cluster loads SU(2) - SU(1) + %p1 = getelementptr inbounds i32, i32* %a, i32 -1 + %tmp1 = load i32, i32* %p1, align 2 + %p2 = getelementptr inbounds i32, i32* %a, i32 -2 + %tmp2 = load i32, i32* %p2, align 2 + %tmp3 = add i32 %tmp1, %tmp2 + ret i32 %tmp3 +}