Index: lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -151,6 +151,7 @@ case AArch64::LDURWi: case AArch64::LDURXi: case AArch64::LDURSWi: + case AArch64::LDURHHi: return true; } } @@ -159,6 +160,18 @@ return isUnscaledLdSt(MI->getOpcode()); } +static bool isSmallTypeLdMerge(unsigned Opc) { + switch (Opc) { + default: + return false; + case AArch64::LDRHHui: + return true; + case AArch64::LDURHHi: + return true; + // FIXME: Add other instructions (e.g, LDRBBui, LDURSHWi, LDRSHWui, etc.). + } +} + // Scaling factor for unscaled load or store. static int getMemScale(MachineInstr *MI) { switch (MI->getOpcode()) { @@ -168,6 +181,7 @@ case AArch64::STRBBui: return 1; case AArch64::LDRHHui: + case AArch64::LDURHHi: case AArch64::STRHHui: return 2; case AArch64::LDRSui: @@ -238,6 +252,8 @@ case AArch64::STURSi: case AArch64::LDRSui: case AArch64::LDURSi: + case AArch64::LDRHHui: + case AArch64::LDURHHi: return Opc; case AArch64::LDRSWui: return AArch64::LDRWui; @@ -283,6 +299,10 @@ case AArch64::LDRSWui: case AArch64::LDURSWi: return AArch64::LDPSWi; + case AArch64::LDRHHui: + return AArch64::LDRWui; + case AArch64::LDURHHi: + return AArch64::LDURWi; } } @@ -484,18 +504,76 @@ RtMI = I; Rt2MI = Paired; } + + int OffsetImm; + MachineInstrBuilder MIB; + + if (isSmallTypeLdMerge(Opc)) { + OffsetImm = getLdStOffsetOp(RtMI).getImm(); + // Change the scaled offset from small to large type. + if (!IsUnscaled) + OffsetImm /= 2; + MachineInstr *RtNewDest = MergeForward ? I : Paired; + // Construct the new load instruction. + // FIXME: currently we support only halfword unsigned load. We need to + // handle byte type, signed, and store instructions as well. + MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) + .addOperand(getLdStRegOp(RtNewDest)) + .addOperand(BaseRegOp) + .addImm(OffsetImm); + + DEBUG(dbgs() + << "Creating new load and extract. Replacing instructions:\n "); + DEBUG(I->print(dbgs())); + DEBUG(dbgs() << " "); + DEBUG(Paired->print(dbgs())); + DEBUG(dbgs() << " with instructions:\n "); + DEBUG(((MachineInstr *)MIB)->print(dbgs())); + + MachineInstr *ExtDestMI = MergeForward ? Paired : I; + int Immr = ExtDestMI == Rt2MI ? 16 : 0; + int Imms = ExtDestMI == Rt2MI ? 31 : 15; + // Create the first bitfield extract. + MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), + TII->get(AArch64::UBFMWri)) + .addOperand(getLdStRegOp(ExtDestMI)) + .addReg(getLdStRegOp(RtNewDest).getReg()) + .addImm(Immr) + .addImm(Imms); + DEBUG(dbgs() << " "); + DEBUG(((MachineInstr *)MIB)->print(dbgs())); + + ExtDestMI = MergeForward ? I : Paired; + Immr = ExtDestMI == RtMI ? 0 : 16; + Imms = ExtDestMI == RtMI ? 15 : 31; + // Create the second bitfield extract. + MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), + TII->get(AArch64::UBFMWri)) + .addOperand(getLdStRegOp(ExtDestMI)) + .addReg(getLdStRegOp(RtNewDest).getReg()) + .addImm(Immr) + .addImm(Imms); + DEBUG(dbgs() << " "); + DEBUG(((MachineInstr *)MIB)->print(dbgs())); + DEBUG(dbgs() << "\n"); + + // Erase the old instructions. + I->eraseFromParent(); + Paired->eraseFromParent(); + + return NextI; + } // Handle Unscaled - int OffsetImm = getLdStOffsetOp(RtMI).getImm(); + OffsetImm = getLdStOffsetOp(RtMI).getImm(); if (IsUnscaled) OffsetImm /= OffsetStride; - // Construct the new instruction. - MachineInstrBuilder MIB = BuildMI(*I->getParent(), InsertionPoint, - I->getDebugLoc(), TII->get(NewOpc)) - .addOperand(getLdStRegOp(RtMI)) - .addOperand(getLdStRegOp(Rt2MI)) - .addOperand(BaseRegOp) - .addImm(OffsetImm); + MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), + TII->get(NewOpc)) + .addOperand(getLdStRegOp(RtMI)) + .addOperand(getLdStRegOp(Rt2MI)) + .addOperand(BaseRegOp) + .addImm(OffsetImm); (void)MIB; // FIXME: Do we need/want to copy the mem operands from the source @@ -622,8 +700,7 @@ /// be combined with the current instruction into a load/store pair. MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, - LdStPairFlags &Flags, - unsigned Limit) { + LdStPairFlags &Flags, unsigned Limit) { MachineBasicBlock::iterator E = I->getParent()->end(); MachineBasicBlock::iterator MBBI = I; MachineInstr *FirstMI = I; @@ -645,7 +722,8 @@ // range, plus allow an extra one in case we find a later insn that matches // with Offset-1) int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1; - if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride)) + if (!isSmallTypeLdMerge(Opc) && + !inBoundsForPair(IsUnscaled, Offset, OffsetStride)) return E; // Track which registers have been modified and used between the first insn @@ -704,18 +782,32 @@ // If the resultant immediate offset of merging these instructions // is out of range for a pairwise instruction, bail and keep looking. bool MIIsUnscaled = isUnscaledLdSt(MI); - if (!inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) { + bool IsSmallTypeLd = isSmallTypeLdMerge(MI->getOpcode()); + if (!IsSmallTypeLd && + !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) { trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); MemInsns.push_back(MI); continue; } - // If the alignment requirements of the paired (scaled) instruction - // can't express the offset of the unscaled input, bail and keep - // looking. - if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) { - trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); - MemInsns.push_back(MI); - continue; + + if (IsSmallTypeLd) { + // If the alignment requirements of the larger type scaled load + // instruction can't express the scaled offset of the smaller type + // input, bail and keep looking. + if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) { + trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); + MemInsns.push_back(MI); + continue; + } + } else { + // If the alignment requirements of the paired (scaled) instruction + // can't express the offset of the unscaled input, bail and keep + // looking. + if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) { + trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); + MemInsns.push_back(MI); + continue; + } } // If the destination register of the loads is the same register, bail // and keep looking. A load-pair instruction with both destination @@ -998,7 +1090,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) { bool Modified = false; - // Two tranformations to do here: + // Three tranformations to do here: // 1) Find loads and stores that can be merged into a single load or store // pair instruction. // e.g., @@ -1013,6 +1105,15 @@ // add x2, x2, #4 // ; becomes // ldr x0, [x2], #4 + // 3) Find halfword loads that can be merged into a single 32-bit word load + // with bitfield extract instructions. + // e.g., + // ldrh w0, [x2] + // ldrh w1, [x2, #2] + // ; becomes + // ldr w0, [x2] + // ubfx w1, w0, #16, #16 + // ubfx w0, w0, #0, #16 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); MBBI != E;) { @@ -1034,6 +1135,7 @@ case AArch64::LDRXui: case AArch64::LDRWui: case AArch64::LDRSWui: + case AArch64::LDRHHui: // Unscaled instructions. case AArch64::STURSi: case AArch64::STURDi: @@ -1045,7 +1147,8 @@ case AArch64::LDURQi: case AArch64::LDURWi: case AArch64::LDURXi: - case AArch64::LDURSWi: { + case AArch64::LDURSWi: + case AArch64::LDURHHi: { // If this is a volatile load/store, don't mess with it. if (MI->hasOrderedMemoryRef()) { ++MBBI; Index: test/CodeGen/AArch64/arm64-ldp.ll =================================================================== --- test/CodeGen/AArch64/arm64-ldp.ll +++ test/CodeGen/AArch64/arm64-ldp.ll @@ -355,3 +355,33 @@ %add = add nsw i64 %sexttmp1, %sexttmp ret i64 %add } + +; CHECK-LABEL: Ldrh_merge +; CHECK-NOT: ldrh +; CHECK: ldr [[NEW_DEST:w[0-9]+]] +; CHECK: uxth w{{[0-9]+}}, [[NEW_DEST]] +; CHECK: lsr w{{[0-9]+}}, [[NEW_DEST]] + +define i16 @Ldrh_merge(i16* nocapture readonly %p) { + %1 = load i16, i16* %p, align 2 + ;%conv = zext i16 %0 to i32 + %arrayidx2 = getelementptr inbounds i16, i16* %p, i64 1 + %2 = load i16, i16* %arrayidx2, align 2 + %add = add nuw nsw i16 %1, %2 + ret i16 %add +} + +; CHECK-LABEL: Ldurh_merge +; CHECK-NOT: ldurh +; CHECK: ldur [[NEW_DEST:w[0-9]+]] +; CHECK: uxth w{{[0-9]+}}, [[NEW_DEST]] +; CHECK: lsr w{{[0-9]+}}, [[NEW_DEST]] +define i16 @Ldurh_merge(i16* nocapture readonly %p) { +entry: + %arrayidx = getelementptr inbounds i16, i16* %p, i64 -2 + %0 = load i16, i16* %arrayidx + %arrayidx3 = getelementptr inbounds i16, i16* %p, i64 -1 + %1 = load i16, i16* %arrayidx3 + %add = add nuw nsw i16 %0, %1 + ret i16 %add +}