Index: lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -41,7 +41,7 @@ STATISTIC(NumPreFolded, "Number of pre-index updates folded"); STATISTIC(NumUnscaledPairCreated, "Number of load/store from unscaled generated"); -STATISTIC(NumSmallTypeMerged, "Number of small type loads merged"); +STATISTIC(NumNarrowLdPromotion, "Number of narrow load promotions"); static cl::opt ScanLimit("aarch64-load-store-scan-limit", cl::init(20), cl::Hidden); @@ -78,13 +78,14 @@ struct AArch64LoadStoreOpt : public MachineFunctionPass { static char ID; - AArch64LoadStoreOpt() : MachineFunctionPass(ID), IsStrictAlign(false) { + AArch64LoadStoreOpt() + : MachineFunctionPass(ID), IsNarrowLdOptProfitable(false) { initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry()); } const AArch64InstrInfo *TII; const TargetRegisterInfo *TRI; - bool IsStrictAlign; + bool IsNarrowLdOptProfitable; // Scan the instructions looking for a load/store that can be combined // with the current instruction into a load/store pair. @@ -127,6 +128,10 @@ // Find and merge foldable ldr/str instructions. bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI); + // Check if converting two narrow loads into a single wider load with + // bitfield extracts is profitable. + void enableNarrowLdPromotion(MachineFunction &Fn); + bool optimizeBlock(MachineBasicBlock &MBB); bool runOnMachineFunction(MachineFunction &Fn) override; @@ -157,6 +162,9 @@ case AArch64::LDURXi: case AArch64::LDURSWi: case AArch64::LDURHHi: + case AArch64::LDURBBi: + case AArch64::LDURSBWi: + case AArch64::LDURSHWi: return true; } } @@ -165,19 +173,40 @@ return isUnscaledLdSt(MI->getOpcode()); } -static bool isSmallTypeLdMerge(unsigned Opc) { +static unsigned getBitExtrOpcode(MachineInstr *MI) { + switch (MI->getOpcode()) { + default: + llvm_unreachable("Unexpected opcode."); + case AArch64::LDRBBui: + case AArch64::LDURBBi: + case AArch64::LDRHHui: + case AArch64::LDURHHi: + return AArch64::UBFMWri; + case AArch64::LDRSBWui: + case AArch64::LDURSBWi: + case AArch64::LDRSHWui: + case AArch64::LDURSHWi: + return AArch64::SBFMWri; + } +} + +static bool isNarrowLd(unsigned Opc) { switch (Opc) { default: return false; case AArch64::LDRHHui: case AArch64::LDURHHi: + case AArch64::LDRBBui: + case AArch64::LDURBBi: + case AArch64::LDRSHWui: + case AArch64::LDURSHWi: + case AArch64::LDRSBWui: + case AArch64::LDURSBWi: return true; - // FIXME: Add other instructions (e.g, LDRBBui, LDURSHWi, LDRSHWui, etc.). } } -static bool isSmallTypeLdMerge(MachineInstr *MI) { - return isSmallTypeLdMerge(MI->getOpcode()); -} + +static bool isNarrowLd(MachineInstr *MI) { return isNarrowLd(MI->getOpcode()); } // Scaling factor for unscaled load or store. static int getMemScale(MachineInstr *MI) { @@ -185,10 +214,15 @@ default: llvm_unreachable("Opcode has unknown scale!"); case AArch64::LDRBBui: + case AArch64::LDURBBi: + case AArch64::LDRSBWui: + case AArch64::LDURSBWi: case AArch64::STRBBui: return 1; case AArch64::LDRHHui: case AArch64::LDURHHi: + case AArch64::LDRSHWui: + case AArch64::LDURSHWi: case AArch64::STRHHui: return 2; case AArch64::LDRSui: @@ -261,11 +295,21 @@ case AArch64::LDURSi: case AArch64::LDRHHui: case AArch64::LDURHHi: + case AArch64::LDRBBui: + case AArch64::LDURBBi: return Opc; case AArch64::LDRSWui: return AArch64::LDRWui; case AArch64::LDURSWi: return AArch64::LDURWi; + case AArch64::LDRSBWui: + return AArch64::LDRBBui; + case AArch64::LDRSHWui: + return AArch64::LDRHHui; + case AArch64::LDURSBWi: + return AArch64::LDURBBi; + case AArch64::LDURSHWi: + return AArch64::LDURHHi; } } @@ -307,9 +351,17 @@ case AArch64::LDURSWi: return AArch64::LDPSWi; case AArch64::LDRHHui: + case AArch64::LDRSHWui: return AArch64::LDRWui; case AArch64::LDURHHi: + case AArch64::LDURSHWi: return AArch64::LDURWi; + case AArch64::LDRBBui: + case AArch64::LDRSBWui: + return AArch64::LDRHHui; + case AArch64::LDURBBi: + case AArch64::LDURSBWi: + return AArch64::LDURHHi; } } @@ -529,14 +581,12 @@ int OffsetImm = getLdStOffsetOp(RtMI).getImm(); - if (isSmallTypeLdMerge(Opc)) { + if (isNarrowLd(Opc)) { // Change the scaled offset from small to large type. if (!IsUnscaled) OffsetImm /= 2; MachineInstr *RtNewDest = MergeForward ? I : Paired; // Construct the new load instruction. - // FIXME: currently we support only halfword unsigned load. We need to - // handle byte type, signed, and store instructions as well. MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2; NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), TII->get(NewOpc)) @@ -556,35 +606,61 @@ DEBUG(dbgs() << " with instructions:\n "); DEBUG((NewMemMI)->print(dbgs())); + int immsHigh = getMemScale(I) == 1 ? 15 : 31; + int immrHigh = getMemScale(I) == 1 ? 8 : 16; + int immsLow = getMemScale(I) == 1 ? 7 : 15; + MachineInstr *ExtDestMI = MergeForward ? Paired : I; if (ExtDestMI == Rt2MI) { // Create the bitfield extract for high half. BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), - TII->get(AArch64::UBFMWri)) + TII->get(getBitExtrOpcode(Rt2MI))) .addOperand(getLdStRegOp(Rt2MI)) .addReg(getLdStRegOp(RtNewDest).getReg()) - .addImm(16) - .addImm(31); + .addImm(immrHigh) + .addImm(immsHigh); + // Create the bitfield extract for low half. - BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), - TII->get(AArch64::ANDWri)) - .addOperand(getLdStRegOp(RtMI)) - .addReg(getLdStRegOp(RtNewDest).getReg()) - .addImm(15); + if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) { + // For unsigned, prefer to use AND for low bits. + BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), + TII->get(AArch64::ANDWri)) + .addOperand(getLdStRegOp(RtMI)) + .addReg(getLdStRegOp(RtNewDest).getReg()) + .addImm(immsLow); + } else { + BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), + TII->get(getBitExtrOpcode(RtMI))) + .addOperand(getLdStRegOp(RtMI)) + .addReg(getLdStRegOp(RtNewDest).getReg()) + .addImm(0) + .addImm(immsLow); + } } else { // Create the bitfield extract for low half. - BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), - TII->get(AArch64::ANDWri)) - .addOperand(getLdStRegOp(RtMI)) - .addReg(getLdStRegOp(RtNewDest).getReg()) - .addImm(15); + if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) { + // For unsigned, prefer to use AND for low bits. + BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), + TII->get(AArch64::ANDWri)) + .addOperand(getLdStRegOp(RtMI)) + .addReg(getLdStRegOp(RtNewDest).getReg()) + .addImm(immsLow); + } else { + BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), + TII->get(getBitExtrOpcode(RtMI))) + .addOperand(getLdStRegOp(RtMI)) + .addReg(getLdStRegOp(RtNewDest).getReg()) + .addImm(0) + .addImm(immsLow); + } + // Create the bitfield extract for high half. BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(), - TII->get(AArch64::UBFMWri)) + TII->get(getBitExtrOpcode(Rt2MI))) .addOperand(getLdStRegOp(Rt2MI)) .addReg(getLdStRegOp(RtNewDest).getReg()) - .addImm(16) - .addImm(31); + .addImm(immrHigh) + .addImm(immsHigh); } DEBUG(dbgs() << " "); DEBUG((BitExtMI1)->print(dbgs())); @@ -757,8 +833,7 @@ // range, plus allow an extra one in case we find a later insn that matches // with Offset-1) int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1; - if (!isSmallTypeLdMerge(Opc) && - !inBoundsForPair(IsUnscaled, Offset, OffsetStride)) + if (!isNarrowLd(Opc) && !inBoundsForPair(IsUnscaled, Offset, OffsetStride)) return E; // Track which registers have been modified and used between the first insn @@ -817,15 +892,15 @@ // If the resultant immediate offset of merging these instructions // is out of range for a pairwise instruction, bail and keep looking. bool MIIsUnscaled = isUnscaledLdSt(MI); - bool IsSmallTypeLd = isSmallTypeLdMerge(MI->getOpcode()); - if (!IsSmallTypeLd && + bool IsNarrowLd = isNarrowLd(MI->getOpcode()); + if (!IsNarrowLd && !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) { trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); MemInsns.push_back(MI); continue; } - if (IsSmallTypeLd) { + if (IsNarrowLd) { // If the alignment requirements of the larger type scaled load // instruction can't express the scaled offset of the smaller type // input, bail and keep looking. @@ -1144,8 +1219,8 @@ LdStPairFlags Flags; MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, ScanLimit); if (Paired != E) { - if (isSmallTypeLdMerge(MI)) { - ++NumSmallTypeMerged; + if (isNarrowLd(MI)) { + ++NumNarrowLdPromotion; } else { ++NumPairCreated; if (isUnscaledLdSt(MI)) @@ -1164,7 +1239,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) { bool Modified = false; // Three tranformations to do here: - // 1) Find halfword loads that can be merged into a single 32-bit word load + // 1) Find narrow loads that can be converted into a single wider load // with bitfield extract instructions. // e.g., // ldrh w0, [x2] @@ -1189,7 +1264,7 @@ // ldr x0, [x2], #4 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); - !IsStrictAlign && MBBI != E;) { + IsNarrowLdOptProfitable && MBBI != E;) { MachineInstr *MI = MBBI; switch (MI->getOpcode()) { default: @@ -1198,8 +1273,14 @@ break; // Scaled instructions. case AArch64::LDRHHui: + case AArch64::LDRBBui: + case AArch64::LDRSBWui: + case AArch64::LDRSHWui: // Unscaled instructions. - case AArch64::LDURHHi: { + case AArch64::LDURHHi: + case AArch64::LDURBBi: + case AArch64::LDURSBWi: + case AArch64::LDURSHWi: { if (tryToMergeLdStInst(MBBI)) { Modified = true; break; @@ -1372,11 +1453,23 @@ return Modified; } +void AArch64LoadStoreOpt::enableNarrowLdPromotion(MachineFunction &Fn) { + const AArch64Subtarget *SubTarget = + &static_cast(Fn.getSubtarget()); + bool ProfitableArch = SubTarget->isCortexA57(); + // FIXME: The benefit from converting narrow loads into a wider loads could be + // microarchitectural as it assumes that a single load with two bitfield + // extracts is cheaper than two narrow loads. Currently, this conversion is + // enabled only in cortex-a57 on which performance benefits were verified. + IsNarrowLdOptProfitable = + ProfitableArch & (!SubTarget->requiresStrictAlign()); +} + bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { TII = static_cast(Fn.getSubtarget().getInstrInfo()); TRI = Fn.getSubtarget().getRegisterInfo(); - IsStrictAlign = (static_cast(Fn.getSubtarget())) - .requiresStrictAlign(); + + enableNarrowLdPromotion(Fn); bool Modified = false; for (auto &MBB : Fn) Index: test/CodeGen/AArch64/arm64-ldp.ll =================================================================== --- test/CodeGen/AArch64/arm64-ldp.ll +++ test/CodeGen/AArch64/arm64-ldp.ll @@ -355,52 +355,3 @@ %add = add nsw i64 %sexttmp1, %sexttmp ret i64 %add } - -; CHECK-LABEL: Ldrh_merge -; CHECK-NOT: ldrh -; CHECK: ldr [[NEW_DEST:w[0-9]+]] -; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xffff -; CHECK: lsr w{{[0-9]+}}, [[NEW_DEST]] - -define i16 @Ldrh_merge(i16* nocapture readonly %p) { - %1 = load i16, i16* %p, align 2 - ;%conv = zext i16 %0 to i32 - %arrayidx2 = getelementptr inbounds i16, i16* %p, i64 1 - %2 = load i16, i16* %arrayidx2, align 2 - %add = add nuw nsw i16 %1, %2 - ret i16 %add -} - -; CHECK-LABEL: Ldurh_merge -; CHECK-NOT: ldurh -; CHECK: ldur [[NEW_DEST:w[0-9]+]] -; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xffff -; CHECK: lsr w{{[0-9]+}}, [[NEW_DEST]] -define i16 @Ldurh_merge(i16* nocapture readonly %p) { -entry: - %arrayidx = getelementptr inbounds i16, i16* %p, i64 -2 - %0 = load i16, i16* %arrayidx - %arrayidx3 = getelementptr inbounds i16, i16* %p, i64 -1 - %1 = load i16, i16* %arrayidx3 - %add = add nuw nsw i16 %0, %1 - ret i16 %add -} - -; CHECK-LABEL: Ldrh_4_merge -; CHECK-NOT: ldrh -; CHECK: ldp [[NEW_DEST:w[0-9]+]] -define i16 @Ldrh_4_merge(i16* nocapture readonly %P) { - %arrayidx = getelementptr inbounds i16, i16* %P, i64 0 - %l0 = load i16, i16* %arrayidx - %arrayidx2 = getelementptr inbounds i16, i16* %P, i64 1 - %l1 = load i16, i16* %arrayidx2 - %arrayidx7 = getelementptr inbounds i16, i16* %P, i64 2 - %l2 = load i16, i16* %arrayidx7 - %arrayidx12 = getelementptr inbounds i16, i16* %P, i64 3 - %l3 = load i16, i16* %arrayidx12 - %add4 = add nuw nsw i16 %l1, %l0 - %add9 = add nuw nsw i16 %add4, %l2 - %add14 = add nuw nsw i16 %add9, %l3 - - ret i16 %add14 -} Index: test/CodeGen/AArch64/arm64-ldr-merge.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/arm64-ldr-merge.ll @@ -0,0 +1,259 @@ +; RUN: llc < %s -march=arm64 -mcpu=cortex-a57 -verify-machineinstrs | FileCheck %s + +; CHECK-LABEL: Ldrh_merge +; CHECK-NOT: ldrh +; CHECK: ldr [[NEW_DEST:w[0-9]+]] +; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xffff +; CHECK: lsr w{{[0-9]+}}, [[NEW_DEST]] + +define i16 @Ldrh_merge(i16* nocapture readonly %p) { + %1 = load i16, i16* %p, align 2 + %arrayidx2 = getelementptr inbounds i16, i16* %p, i64 1 + %2 = load i16, i16* %arrayidx2, align 2 + %add = add nuw nsw i16 %1, %2 + ret i16 %add +} + +; CHECK-LABEL: Ldurh_merge +; CHECK-NOT: ldurh +; CHECK: ldur [[NEW_DEST:w[0-9]+]] +; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xffff +; CHECK: lsr w{{[0-9]+}}, [[NEW_DEST]] +define i16 @Ldurh_merge(i16* nocapture readonly %p) { + %arrayidx = getelementptr inbounds i16, i16* %p, i64 -2 + %l0 = load i16, i16* %arrayidx + %arrayidx3 = getelementptr inbounds i16, i16* %p, i64 -1 + %l1 = load i16, i16* %arrayidx3 + %add = add nuw nsw i16 %l0, %l1 + ret i16 %add +} + +; CHECK-LABEL: Ldrb_merge +; CHECK-NOT: ldrb +; CHECK: ldrh [[NEW_DEST:w[0-9]+]] +; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xff +; CHECK: ubfx w{{[0-9]+}}, [[NEW_DEST]], #8, #8 +define i8 @Ldrb_merge(i8* nocapture readonly %P) { + %arrayidx = getelementptr inbounds i8, i8* %P, i64 0 + %l0 = load i8, i8* %arrayidx + %arrayidx2 = getelementptr inbounds i8, i8* %P, i64 1 + %l1 = load i8, i8* %arrayidx2 + %add = add nuw nsw i8 %l0, %l1 + ret i8 %add +} + +; CHECK-LABEL: Ldurb_merge +; CHECK-NOT: ldurb +; CHECK: ldurh [[NEW_DEST:w[0-9]+]] +; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xff +; CHECK: ubfx w{{[0-9]+}}, [[NEW_DEST]], #8, #8 +define i8 @Ldurb_merge(i8* nocapture readonly %P) { + %arrayidx = getelementptr inbounds i8, i8* %P, i64 -2 + %l0 = load i8, i8* %arrayidx + %arrayidx3 = getelementptr inbounds i8, i8* %P, i64 -1 + %l1 = load i8, i8* %arrayidx3 + %add = add nuw nsw i8 %l0, %l1 + ret i8 %add +} + +; CHECK-LABEL: Ldrh_4_merge +; CHECK-NOT: ldrh +; CHECK: ldp [[NEW_DEST:w[0-9]+]] +define i16 @Ldrh_4_merge(i16* nocapture readonly %P) { + %arrayidx = getelementptr inbounds i16, i16* %P, i64 0 + %l0 = load i16, i16* %arrayidx + %arrayidx2 = getelementptr inbounds i16, i16* %P, i64 1 + %l1 = load i16, i16* %arrayidx2 + %arrayidx7 = getelementptr inbounds i16, i16* %P, i64 2 + %l2 = load i16, i16* %arrayidx7 + %arrayidx12 = getelementptr inbounds i16, i16* %P, i64 3 + %l3 = load i16, i16* %arrayidx12 + %add4 = add nuw nsw i16 %l1, %l0 + %add9 = add nuw nsw i16 %add4, %l2 + %add14 = add nuw nsw i16 %add9, %l3 + + ret i16 %add14 +} + +; CHECK-LABEL: Ldrsh_merge +; CHECK-NOT: ldrsh +; CHECK: ldr [[NEW_DEST:w[0-9]+]] +; CHECK: asr w{{[0-9]+}}, [[NEW_DEST]], #16 +; CHECK: sxth w{{[0-9]+}}, [[NEW_DEST]] +define i32 @Ldrsh_merge(i16* %p) nounwind { + %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 5 + %tmp = load i16, i16* %add.ptr0 + %add.ptr = getelementptr inbounds i16, i16* %p, i64 4 + %tmp1 = load i16, i16* %add.ptr + %sexttmp = sext i16 %tmp to i32 + %sexttmp1 = sext i16 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + + +; CHECK-LABEL: Ldrsh_zsext_merge +; CHECK: ldr [[NEW_DEST:w[0-9]+]] +; CHECK: lsr w{{[0-9]+}}, [[NEW_DEST]], #16 +; CHECK: sxth w{{[0-9]+}}, [[NEW_DEST]] +define i32 @Ldrsh_zsext_merge(i16* %p) nounwind { + %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 5 + %tmp = load i16, i16* %add.ptr0 + %add.ptr = getelementptr inbounds i16, i16* %p, i64 4 + %tmp1 = load i16, i16* %add.ptr + %sexttmp = zext i16 %tmp to i32 + %sexttmp1 = sext i16 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldrsh_szext_merge +; CHECK: ldr [[NEW_DEST:w[0-9]+]] +; CHECK: asr w{{[0-9]+}}, [[NEW_DEST]], #16 +; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xffff +define i32 @Ldrsh_szext_merge(i16* %p) nounwind { + %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 5 + %tmp = load i16, i16* %add.ptr0 + %add.ptr = getelementptr inbounds i16, i16* %p, i64 4 + %tmp1 = load i16, i16* %add.ptr + %sexttmp = sext i16 %tmp to i32 + %sexttmp1 = zext i16 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldrsb_merge +; CHECK: ldrh [[NEW_DEST:w[0-9]+]] +; CHECK: sxtb w{{[0-9]+}}, [[NEW_DEST]] +; CHECK: sbfx w{{[0-9]+}}, [[NEW_DEST]], #8, #8 +define i32 @Ldrsb_merge(i8* %p) nounwind { + %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 2 + %tmp = load i8, i8* %add.ptr0 + %add.ptr = getelementptr inbounds i8, i8* %p, i64 3 + %tmp1 = load i8, i8* %add.ptr + %sexttmp = sext i8 %tmp to i32 + %sexttmp1 = sext i8 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldrsb_zsext_merge +; CHECK: ldrh [[NEW_DEST:w[0-9]+]] +; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xff +; CHECK: sbfx w{{[0-9]+}}, [[NEW_DEST]], #8, #8 +define i32 @Ldrsb_zsext_merge(i8* %p) nounwind { + %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 2 + %tmp = load i8, i8* %add.ptr0 + %add.ptr = getelementptr inbounds i8, i8* %p, i64 3 + %tmp1 = load i8, i8* %add.ptr + %sexttmp = zext i8 %tmp to i32 + %sexttmp1 = sext i8 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldrsb_szext_merge +; CHECK: ldrh [[NEW_DEST:w[0-9]+]] +; CHECK: sxtb w{{[0-9]+}}, [[NEW_DEST]] +; CHECK: ubfx w{{[0-9]+}}, [[NEW_DEST]], #8, #8 +define i32 @Ldrsb_szext_merge(i8* %p) nounwind { + %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 2 + %tmp = load i8, i8* %add.ptr0 + %add.ptr = getelementptr inbounds i8, i8* %p, i64 3 + %tmp1 = load i8, i8* %add.ptr + %sexttmp = sext i8 %tmp to i32 + %sexttmp1 = zext i8 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldursh_merge +; CHECK: ldur [[NEW_DEST:w[0-9]+]] +; CHECK: asr w{{[0-9]+}}, [[NEW_DEST]], #16 +; CHECK: sxth w{{[0-9]+}}, [[NEW_DEST]] +define i32 @Ldursh_merge(i16* %p) nounwind { + %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 -1 + %tmp = load i16, i16* %add.ptr0 + %add.ptr = getelementptr inbounds i16, i16* %p, i64 -2 + %tmp1 = load i16, i16* %add.ptr + %sexttmp = sext i16 %tmp to i32 + %sexttmp1 = sext i16 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + + +; CHECK-LABEL: Ldursh_zsext_merge +; CHECK: ldur [[NEW_DEST:w[0-9]+]] +; CHECK: lsr w{{[0-9]+}}, [[NEW_DEST]], #16 +; CHECK: sxth w{{[0-9]+}}, [[NEW_DEST]] +define i32 @Ldursh_zsext_merge(i16* %p) nounwind { + %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 -1 + %tmp = load i16, i16* %add.ptr0 + %add.ptr = getelementptr inbounds i16, i16* %p, i64 -2 + %tmp1 = load i16, i16* %add.ptr + %sexttmp = zext i16 %tmp to i32 + %sexttmp1 = sext i16 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldursh_szext_merge +; CHECK: ldur [[NEW_DEST:w[0-9]+]] +; CHECK: asr w{{[0-9]+}}, [[NEW_DEST]], #16 +; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xffff +define i32 @Ldursh_szext_merge(i16* %p) nounwind { + %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 -1 + %tmp = load i16, i16* %add.ptr0 + %add.ptr = getelementptr inbounds i16, i16* %p, i64 -2 + %tmp1 = load i16, i16* %add.ptr + %sexttmp = sext i16 %tmp to i32 + %sexttmp1 = zext i16 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldursb_merge +; CHECK: ldurh [[NEW_DEST:w[0-9]+]] +; CHECK: sbfx w{{[0-9]+}}, [[NEW_DEST]], #8, #8 +; CHECK: sxtb w{{[0-9]+}}, [[NEW_DEST]] +define i32 @Ldursb_merge(i8* %p) nounwind { + %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 -1 + %tmp = load i8, i8* %add.ptr0 + %add.ptr = getelementptr inbounds i8, i8* %p, i64 -2 + %tmp1 = load i8, i8* %add.ptr + %sexttmp = sext i8 %tmp to i32 + %sexttmp1 = sext i8 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldursb_zsext_merge +; CHECK: ldurh [[NEW_DEST:w[0-9]+]] +; CHECK: ubfx w{{[0-9]+}}, [[NEW_DEST]], #8, #8 +; CHECK: sxtb w{{[0-9]+}}, [[NEW_DEST]] +define i32 @Ldursb_zsext_merge(i8* %p) nounwind { + %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 -1 + %tmp = load i8, i8* %add.ptr0 + %add.ptr = getelementptr inbounds i8, i8* %p, i64 -2 + %tmp1 = load i8, i8* %add.ptr + %sexttmp = zext i8 %tmp to i32 + %sexttmp1 = sext i8 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +} + +; CHECK-LABEL: Ldursb_szext_merge +; CHECK: ldurh [[NEW_DEST:w[0-9]+]] +; CHECK: sbfx w{{[0-9]+}}, [[NEW_DEST]], #8, #8 +; CHECK: and w{{[0-9]+}}, [[NEW_DEST]], #0xff +define i32 @Ldursb_szext_merge(i8* %p) nounwind { + %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 -1 + %tmp = load i8, i8* %add.ptr0 + %add.ptr = getelementptr inbounds i8, i8* %p, i64 -2 + %tmp1 = load i8, i8* %add.ptr + %sexttmp = sext i8 %tmp to i32 + %sexttmp1 = zext i8 %tmp1 to i32 + %add = add nsw i32 %sexttmp1, %sexttmp + ret i32 %add +}