Index: llvm/include/llvm/CodeGen/TargetPassConfig.h =================================================================== --- llvm/include/llvm/CodeGen/TargetPassConfig.h +++ llvm/include/llvm/CodeGen/TargetPassConfig.h @@ -130,6 +130,9 @@ /// Default setting for -enable-tail-merge on this target. bool EnableTailMerge = true; + /// Enable sinking copies of instructions in MachineSink + bool EnableCopySink = false; + /// Require processing of functions such that callees are generated before /// callers. bool RequireCodeGenSCCOrder = false; @@ -176,6 +179,9 @@ bool getEnableTailMerge() const { return EnableTailMerge; } void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); } + bool getEnableCopySink() const { return EnableCopySink; } + void setEnableCopySink(bool Enable) { setOpt(EnableCopySink, Enable); } + bool requiresCodeGenSCCOrder() const { return RequireCodeGenSCCOrder; } void setRequiresCodeGenSCCOrder(bool Enable = true) { setOpt(RequireCodeGenSCCOrder, Enable); Index: llvm/lib/CodeGen/MachineSink.cpp =================================================================== --- llvm/lib/CodeGen/MachineSink.cpp +++ llvm/lib/CodeGen/MachineSink.cpp @@ -41,6 +41,7 @@ #include "llvm/CodeGen/RegisterClassInfo.h" #include "llvm/CodeGen/RegisterPressure.h" #include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/BasicBlock.h" @@ -115,6 +116,7 @@ namespace { class MachineSinking : public MachineFunctionPass { + const TargetSubtargetInfo *STI; const TargetInstrInfo *TII; const TargetRegisterInfo *TRI; MachineRegisterInfo *MRI; // Machine register information @@ -165,7 +167,10 @@ StoreInstrCache; /// Cached BB's register pressure. - std::map> CachedRegisterPressure; + std::map> CachedRegisterPressure; + + bool EnableCopySink; + DenseMap CopySinkCandidates; public: static char ID; // Pass identification @@ -187,6 +192,7 @@ AU.addPreserved(); if (UseBlockFreqInfo) AU.addRequired(); + AU.addRequired(); } void releaseMemory() override { @@ -246,11 +252,18 @@ bool PerformTrivialForwardCoalescing(MachineInstr &MI, MachineBasicBlock *MBB); + bool PerformCopySink(MachineInstr &MI, MachineBasicBlock *MBB); + SmallVector & GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB, AllSuccsCache &AllSuccessors) const; - std::vector &getBBRegisterPressure(MachineBasicBlock &MBB); + std::vector &getBBRegisterPressure(const MachineBasicBlock &MBB); + + bool registerPressureSetExceedsLimit(unsigned NRegs, const TargetRegisterClass *RC, + const MachineBasicBlock &MBB); + + bool isProfitableToSinkCopy(const MachineInstr &MI); }; } // end anonymous namespace @@ -300,6 +313,174 @@ return true; } +bool MachineSinking::isProfitableToSinkCopy(const MachineInstr &MI) { + // Copy from a hard register or not profitable to replace a copy. + if (MI.isCopyLike() || MI.getOpcode() == TargetOpcode::REG_SEQUENCE || + !TII->isAsCheapAsAMove(MI)) + return false; + + // Don't sink instructions that the target prefers not to sink. + if (!TII->shouldSink(MI)) + return false; + + // Check if it's safe to move the instruction. + bool SawStore = true; + if (!MI.isSafeToMove(AA, SawStore)) + return false; + + // Convergent operations may not be made control-dependent on additional + // values. + if (MI.isConvergent()) + return false; + + // Don't sink defs/uses of hard registers or if the instruction defines more + // than one register. + // Don't sink more than two register uses - it'll cover most of the cases and + // greatly simplifies the register pressure checks. + Register DefReg; + Register UsedRegA, UsedRegB; + for (const MachineOperand &MO : MI.operands()) { + if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() || + MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() || + MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask()) + continue; + if (!MO.isReg()) + return false; + + Register Reg = MO.getReg(); + if (Reg == 0) + continue; + + if (Reg.isVirtual()) { + if (MO.isDef()) { + if (DefReg) + return false; + DefReg = Reg; + continue; + } + + if (UsedRegA == 0) + UsedRegA = Reg; + else if (UsedRegB == 0) + UsedRegB = Reg; + else + return false; + continue; + } + + if (Reg.isPhysical() && + (MRI->isConstantPhysReg(Reg) || TII->isIgnorableUse(MO))) + continue; + + return false; + } + + // Scan uses of the destination register. Every use must be a copy, with a + // chain of copies terminating with a hard register. + SmallVector Worklist; + Worklist.push_back(DefReg); + + const TargetRegisterClass *RC = MRI->getRegClass(DefReg); + const TargetRegisterClass *RCA = + UsedRegA == 0 ? nullptr : MRI->getRegClass(UsedRegA); + const TargetRegisterClass *RCB = + UsedRegB == 0 ? nullptr : MRI->getRegClass(UsedRegB); + + while (!Worklist.empty()) { + Register Reg = Worklist.pop_back_val(); + + for (const MachineOperand &MO : MRI->use_nodbg_operands(Reg)) { + const MachineInstr &UseInst = *MO.getParent(); + if (!UseInst.isCopy()) + return false; + + Register DstReg = UseInst.getOperand(0).getReg(); + if (DstReg.isVirtual()) { + Worklist.push_back(DstReg); + continue; + } + + if (UseInst.getParent() == MI.getParent()) + return false; + + // The hard register must be in the register class of the original + // instruction's destination register. + if (!RC->contains(DstReg)) + return false; + + // If the register class of source of the copy is a superset of any of + // the register classes of the operands of the materialized instruction + // don't consider that live range extended. + const TargetRegisterClass *RCS = + MRI->getRegClass(UseInst.getOperand(1).getReg()); + if (RCA && RCA->hasSuperClassEq(RCS)) + RCA = nullptr; + else if (RCB && RCB->hasSuperClassEq(RCS)) + RCB = nullptr; + if (RCA || RCB) { + if (RCA == nullptr) { + RCA = RCB; + RCB = nullptr; + } + + unsigned NRegs = !!RCA + !!RCB; + if (RCA == RCB) + RCB = nullptr; + + // Check we don't exceed register presure at the destination. + const MachineBasicBlock &MBB = *UseInst.getParent(); + if (RCB == nullptr) { + if (registerPressureSetExceedsLimit(NRegs, RCA, MBB)) + return false; + } else if (registerPressureSetExceedsLimit(1, RCA, MBB) || + registerPressureSetExceedsLimit(1, RCB, MBB)) + return false; + } + } + } + + // New we know we can fold the instruction in all its users. + if (UsedRegA) + MRI->clearKillFlags(UsedRegA); + if (UsedRegB) + MRI->clearKillFlags(UsedRegB); + + return true; +} + +bool MachineSinking::PerformCopySink(MachineInstr &MI, MachineBasicBlock *MBB) { + if (!MI.isCopy()) + return false; + + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + if (!SrcReg.isVirtual() || !DstReg.isPhysical()) + return false; + + // Skip a chain of virtual register copies until we reach a non-copy + // instruction. + MachineInstr *DefMI = MRI->getVRegDef(SrcReg); + while (DefMI->isCopy() && + (SrcReg = DefMI->getOperand(1).getReg()).isVirtual()) { + DefMI = MRI->getVRegDef(SrcReg); + } + + bool CanSink; + if (auto It = CopySinkCandidates.find(DefMI); + It != CopySinkCandidates.end()) + CanSink = It->second; + else + CopySinkCandidates[DefMI] = CanSink = isProfitableToSinkCopy(*DefMI); + + if (!CanSink) + return false; + + TII->reMaterialize(*MBB, MI.getIterator(), DstReg, 0, *DefMI, *TRI); + MI.eraseFromParent(); + + return true; +} + /// AllUsesDominatedByBlock - Return true if all uses of the specified register /// occur in blocks dominated by the specified block. If any use is in the /// definition block, then return false since it is never legal to move def @@ -423,8 +604,9 @@ LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n"); - TII = MF.getSubtarget().getInstrInfo(); - TRI = MF.getSubtarget().getRegisterInfo(); + STI = &MF.getSubtarget(); + TII = STI->getInstrInfo(); + TRI = STI->getRegisterInfo(); MRI = &MF.getRegInfo(); DT = &getAnalysis(); PDT = &getAnalysis(); @@ -433,6 +615,8 @@ MBPI = &getAnalysis(); AA = &getAnalysis().getAAResults(); RegClassInfo.runOnMachineFunction(MF); + TargetPassConfig *PassConfig = &getAnalysis(); + EnableCopySink = PassConfig->getEnableCopySink(); bool EverMadeChange = false; @@ -509,8 +693,8 @@ } bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) { - // Can't sink anything out of a block that has less than two successors. - if (MBB.succ_size() <= 1 || MBB.empty()) return false; + if ((!EnableCopySink && MBB.succ_size() <= 1) || MBB.empty()) + return false; // Don't bother sinking code out of unreachable blocks. In addition to being // unprofitable, it can also lead to infinite looping, because in an @@ -541,8 +725,39 @@ continue; } - bool Joined = PerformTrivialForwardCoalescing(MI, &MBB); - if (Joined) { + if (EnableCopySink) { + if (MI.isCopy()) { + Register Reg = MI.getOperand(0).getReg(); + if (Reg.isVirtual() && MRI->use_empty(Reg)) { + MI.eraseFromParent(); + MadeChange = true; + continue; + } + } + + if (auto It = CopySinkCandidates.find(&MI); + It != CopySinkCandidates.end() && It->second) { + Register Reg = MI.getOperand(0).getReg(); + assert(Reg.isVirtual() && + "Only virtual register defs can be copy sunk"); + if (MRI->use_empty(Reg)) { + MI.eraseFromParent(); + MadeChange = true; + continue; + } + } + + if (PerformCopySink(MI, &MBB)) { + MadeChange = true; + continue; + } + } + + // Can't sink anything out of a block that has less than two successors. + if (MBB.succ_size() <= 1) + continue; + + if (PerformTrivialForwardCoalescing(MI, &MBB)) { MadeChange = true; continue; } @@ -559,7 +774,7 @@ SeenDbgVars.clear(); // recalculate the bb register pressure after sinking one BB. CachedRegisterPressure.clear(); - + CopySinkCandidates.clear(); return MadeChange; } @@ -701,7 +916,7 @@ } std::vector & -MachineSinking::getBBRegisterPressure(MachineBasicBlock &MBB) { +MachineSinking::getBBRegisterPressure(const MachineBasicBlock &MBB) { // Currently to save compiling time, MBB's register pressure will not change // in one ProcessBlock iteration because of CachedRegisterPressure. but MBB's // register pressure is changed after sinking any instructions into it. @@ -717,10 +932,10 @@ RPTracker.init(MBB.getParent(), &RegClassInfo, nullptr, &MBB, MBB.end(), /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true); - for (MachineBasicBlock::iterator MII = MBB.instr_end(), + for (MachineBasicBlock::const_iterator MII = MBB.instr_end(), MIE = MBB.instr_begin(); MII != MIE; --MII) { - MachineInstr &MI = *std::prev(MII); + const MachineInstr &MI = *std::prev(MII); if (MI.isDebugInstr() || MI.isPseudoProbe()) continue; RegisterOperands RegOpers; @@ -736,6 +951,18 @@ return It.first->second; } +bool MachineSinking::registerPressureSetExceedsLimit( + unsigned NRegs, const TargetRegisterClass *RC, const MachineBasicBlock &MBB) { + unsigned Weight = NRegs * TRI->getRegClassWeight(RC).RegWeight; + const int *PS = TRI->getRegClassPressureSets(RC); + std::vector BBRegisterPressure = getBBRegisterPressure(MBB); + for (; *PS != -1; PS++) + if (Weight + BBRegisterPressure[*PS] >= + TRI->getRegPressureSetLimit(*MBB.getParent(), *PS)) + return true; + return false; +} + /// isProfitableToSinkTo - Return true if it is profitable to sink MI. bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI, MachineBasicBlock *MBB, @@ -780,21 +1007,6 @@ if (!MCycle) return false; - auto isRegisterPressureSetExceedLimit = [&](const TargetRegisterClass *RC) { - unsigned Weight = TRI->getRegClassWeight(RC).RegWeight; - const int *PS = TRI->getRegClassPressureSets(RC); - // Get register pressure for block SuccToSinkTo. - std::vector BBRegisterPressure = - getBBRegisterPressure(*SuccToSinkTo); - for (; *PS != -1; PS++) - // check if any register pressure set exceeds limit in block SuccToSinkTo - // after sinking. - if (Weight + BBRegisterPressure[*PS] >= - TRI->getRegPressureSetLimit(*MBB->getParent(), *PS)) - return true; - return false; - }; - // If this instruction is inside a Cycle and sinking this instruction can make // more registers live range shorten, it is still prifitable. for (const MachineOperand &MO : MI.operands()) { @@ -836,7 +1048,7 @@ // The DefMI is defined inside the cycle. // If sinking this operand makes some register pressure set exceed limit, // it is not profitable. - if (isRegisterPressureSetExceedLimit(MRI->getRegClass(Reg))) { + if (registerPressureSetExceedsLimit(1, MRI->getRegClass(Reg), *SuccToSinkTo)) { LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable."); return false; } Index: llvm/lib/Target/AArch64/AArch64TargetMachine.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64TargetMachine.cpp +++ llvm/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -468,6 +468,7 @@ : TargetPassConfig(TM, PM) { if (TM.getOptLevel() != CodeGenOpt::None) substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); + setEnableCopySink(true); } AArch64TargetMachine &getAArch64TargetMachine() const { Index: llvm/test/CodeGen/AArch64/copy-sink.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/copy-sink.ll @@ -0,0 +1,178 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s | FileCheck %s +target triple = "aarch64-linux" + +declare void @use(...) + +; Check address is sunk towards the load, since CodeGenPrepare considers it likely to be +; "folded" into the call as well. +define i32 @f0(i1 %c1, ptr %p) { +; CHECK-LABEL: f0: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: tbz w0, #0, .LBB0_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: add x0, x1, #8 +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB0_2: // %if.else +; CHECK-NEXT: ldr w0, [x1, #8] +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +entry: + %a = getelementptr i32, ptr %p, i32 2 + br i1 %c1, label %if.then, label %if.else + +if.then: + %v0 = call i32 @use(ptr %a) + br label %exit + +if.else: + %v1 = load i32, ptr %a + br label %exit + +exit: + %v = phi i32 [%v0, %if.then], [%v1, %if.else] + ret i32 %v +} + +define i32 @f1(i1 %c1, ptr %p, i64 %i) { +; CHECK-LABEL: f1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: tbz w0, #0, .LBB1_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: add x0, x1, x2 +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB1_2: // %if.else +; CHECK-NEXT: ldr w0, [x1, x2] +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +entry: + %a = getelementptr i8, ptr %p, i64 %i + br i1 %c1, label %if.then, label %if.else + +if.then: + %v0 = call i32 @use(ptr %a) + br label %exit + +if.else: + %v1 = load i32, ptr %a + br label %exit + +exit: + %v = phi i32 [%v0, %if.then], [%v1, %if.else] + ret i32 %v +} + +; Address calculation too complex. +define i32 @f2(i1 %c1, ptr %p, i64 %i) { +; CHECK-LABEL: f2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: add x0, x1, x2, lsl #2 +; CHECK-NEXT: tbz w8, #0, .LBB2_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB2_2: // %if.else +; CHECK-NEXT: ldr w0, [x0] +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +entry: + %a = getelementptr i32, ptr %p, i64 %i + br i1 %c1, label %if.then, label %if.else + +if.then: + %v0 = call i32 @use(ptr %a) + br label %exit + +if.else: + %v1 = load i32, ptr %a + br label %exit + +exit: + %v = phi i32 [%v0, %if.then], [%v1, %if.else] + ret i32 %v +} + +; Address calculation cheap enough on some cores. +define i32 @f3(i1 %c1, ptr %p, i64 %i) "target-cpu"="neoverse-n1" { +; CHECK-LABEL: f3: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: tbz w0, #0, .LBB3_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: add x0, x1, x2, lsl #2 +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB3_2: // %if.else +; CHECK-NEXT: ldr w0, [x1, x2, lsl #2] +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +entry: + %a = getelementptr i32, ptr %p, i64 %i + br i1 %c1, label %if.then, label %if.else + +if.then: + %v0 = call i32 @use(ptr %a) + br label %exit + +if.else: + %v1 = load i32, ptr %a + br label %exit + +exit: + %v = phi i32 [%v0, %if.then], [%v1, %if.else] + ret i32 %v +} + +define i32 @f4(i1 %c1, ptr %p, i64 %i) { +; CHECK-LABEL: f4: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: mov x8, x1 +; CHECK-NEXT: tbz w0, #0, .LBB4_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: add x0, x8, x2 +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB4_2: // %if.else +; CHECK-NEXT: add x1, x8, x2 +; CHECK-NEXT: mov w0, #1 +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +entry: + %a = getelementptr i8, ptr %p, i64 %i + br i1 %c1, label %if.then, label %if.else + +if.then: + %v0 = call i32 @use(ptr %a) + br label %exit + +if.else: + %v1 = call i32 @use(i32 1, ptr %a) + br label %exit + +exit: + %v = phi i32 [%v0, %if.then], [%v1, %if.else] + ret i32 %v +} Index: llvm/test/CodeGen/AArch64/loop-sink.mir =================================================================== --- llvm/test/CodeGen/AArch64/loop-sink.mir +++ llvm/test/CodeGen/AArch64/loop-sink.mir @@ -329,27 +329,22 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64common = COPY $x1 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x0 ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = nuw ADDXri [[COPY]], 4, 0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64all = COPY [[ADDXri]] ; CHECK-NEXT: [[ADDXri1:%[0-9]+]]:gpr64sp = nuw ADDXri [[COPY]], 8, 0 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64all = COPY [[ADDXri1]] ; CHECK-NEXT: [[ADDXri2:%[0-9]+]]:gpr64sp = nuw ADDXri [[COPY]], 12, 0 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64all = COPY [[ADDXri2]] ; CHECK-NEXT: [[ADDXri3:%[0-9]+]]:gpr64sp = nuw ADDXri [[COPY]], 16, 0 - ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr64all = COPY [[ADDXri3]] ; CHECK-NEXT: [[ADDXri4:%[0-9]+]]:gpr64sp = nuw ADDXri [[COPY]], 20, 0 - ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gpr64all = COPY [[ADDXri4]] ; CHECK-NEXT: [[ADDXri5:%[0-9]+]]:gpr64sp = ADDXri [[COPY1]], 1, 0 - ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gpr64all = COPY [[ADDXri5]] + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64all = COPY [[ADDXri5]] ; CHECK-NEXT: [[MOVaddrJT:%[0-9]+]]:gpr64common = MOVaddrJT target-flags(aarch64-page) %jump-table.0, target-flags(aarch64-pageoff, aarch64-nc) %jump-table.0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1..backedge: ; CHECK-NEXT: successors: %bb.9(0x09249249), %bb.2(0x76db6db7) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr64sp = PHI [[COPY7]], %bb.0, %7, %bb.9 + ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr64sp = PHI [[COPY2]], %bb.0, %7, %bb.9 ; CHECK-NEXT: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[PHI]], 0 :: (load (s8) from %ir.lsr.iv) ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, killed [[LDRBBui]], %subreg.sub_32 - ; CHECK-NEXT: [[COPY8:%[0-9]+]]:gpr32sp = COPY [[SUBREG_TO_REG]].sub_32 - ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri killed [[COPY8]], 50, 0, implicit-def $nzcv + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32sp = COPY [[SUBREG_TO_REG]].sub_32 + ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri killed [[COPY3]], 50, 0, implicit-def $nzcv ; CHECK-NEXT: Bcc 8, %bb.9, implicit $nzcv ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2..backedge: @@ -371,7 +366,7 @@ ; CHECK-NEXT: successors: %bb.9(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: $x0 = COPY [[COPY2]] + ; CHECK-NEXT: $x0 = nuw ADDXri [[COPY]], 4, 0 ; CHECK-NEXT: BL @_Z6assignPj, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit-def $sp ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp ; CHECK-NEXT: B %bb.9 @@ -380,7 +375,7 @@ ; CHECK-NEXT: successors: %bb.9(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: $x0 = COPY [[COPY3]] + ; CHECK-NEXT: $x0 = nuw ADDXri [[COPY]], 8, 0 ; CHECK-NEXT: BL @_Z6assignPj, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit-def $sp ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp ; CHECK-NEXT: B %bb.9 @@ -389,7 +384,7 @@ ; CHECK-NEXT: successors: %bb.9(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: $x0 = COPY [[COPY4]] + ; CHECK-NEXT: $x0 = nuw ADDXri [[COPY]], 12, 0 ; CHECK-NEXT: BL @_Z6assignPj, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit-def $sp ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp ; CHECK-NEXT: B %bb.9 @@ -398,7 +393,7 @@ ; CHECK-NEXT: successors: %bb.9(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: $x0 = COPY [[COPY5]] + ; CHECK-NEXT: $x0 = nuw ADDXri [[COPY]], 16, 0 ; CHECK-NEXT: BL @_Z6assignPj, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit-def $sp ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp ; CHECK-NEXT: B %bb.9 @@ -407,7 +402,7 @@ ; CHECK-NEXT: successors: %bb.9(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: $x0 = COPY [[COPY6]] + ; CHECK-NEXT: $x0 = nuw ADDXri [[COPY]], 20, 0 ; CHECK-NEXT: BL @_Z6assignPj, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit-def $sp ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp ; CHECK-NEXT: {{ $}} @@ -415,7 +410,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[ADDXri6:%[0-9]+]]:gpr64sp = ADDXri [[PHI]], 1, 0 - ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gpr64all = COPY [[ADDXri6]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64all = COPY [[ADDXri6]] ; CHECK-NEXT: B %bb.1 bb.0 (%ir-block.bb): successors: %bb.1(0x80000000) @@ -1383,9 +1378,8 @@ ; CHECK-NEXT: liveins: $x0, $x1, $x2, $w3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32common = COPY $w3 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x2 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64common = COPY $x1 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64common = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64common = COPY $x0 ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv ; CHECK-NEXT: Bcc 11, %bb.2, implicit $nzcv ; CHECK-NEXT: B %bb.1 @@ -1393,30 +1387,30 @@ ; CHECK-NEXT: bb.1.for.body.preheader: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[LDRWui:%[0-9]+]]:gpr32common = LDRWui [[COPY3]], 0 :: (load (s32) from %ir.read, !tbaa !0) + ; CHECK-NEXT: [[LDRWui:%[0-9]+]]:gpr32common = LDRWui [[COPY2]], 0 :: (load (s32) from %ir.read, !tbaa !0) ; CHECK-NEXT: [[ADDWri:%[0-9]+]]:gpr32sp = ADDWri [[LDRWui]], 42, 0 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr32all = COPY [[ADDWri]] + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32all = COPY [[ADDWri]] ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 43 - ; CHECK-NEXT: STRWui killed [[MOVi32imm]], [[COPY3]], 0 :: (store (s32) into %ir.read, !tbaa !0) + ; CHECK-NEXT: STRWui killed [[MOVi32imm]], [[COPY2]], 0 :: (store (s32) into %ir.read, !tbaa !0) ; CHECK-NEXT: B %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2.for.cond.cleanup: ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr32 = PHI [[COPY]], %bb.0, %6, %bb.3 - ; CHECK-NEXT: STRWui [[PHI]], [[COPY2]], 0 :: (store (s32) into %ir.write, !tbaa !0) + ; CHECK-NEXT: STRWui [[PHI]], [[COPY1]], 0 :: (store (s32) into %ir.write, !tbaa !0) ; CHECK-NEXT: RET_ReallyLR ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3.for.body: ; CHECK-NEXT: successors: %bb.2(0x04000000), %bb.3(0x7c000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gpr32common = PHI [[COPY4]], %bb.1, %8, %bb.3 + ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gpr32common = PHI [[COPY3]], %bb.1, %8, %bb.3 ; CHECK-NEXT: [[PHI2:%[0-9]+]]:gpr32sp = PHI [[COPY]], %bb.1, %7, %bb.3 ; CHECK-NEXT: [[PHI3:%[0-9]+]]:gpr32 = PHI [[COPY]], %bb.1, %6, %bb.3 ; CHECK-NEXT: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[PHI3]], [[PHI1]] - ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr32all = COPY [[SDIVWr]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr32all = COPY [[SDIVWr]] ; CHECK-NEXT: [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[PHI2]], 1, 0, implicit-def $nzcv - ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gpr32all = COPY [[SUBSWri1]] + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr32all = COPY [[SUBSWri1]] ; CHECK-NEXT: [[ADDWri1:%[0-9]+]]:gpr32sp = ADDWri [[PHI1]], 1, 0 - ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gpr32all = COPY [[ADDWri1]] + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gpr32all = COPY [[ADDWri1]] ; CHECK-NEXT: Bcc 0, %bb.2, implicit $nzcv ; CHECK-NEXT: B %bb.3 bb.0.entry: