Index: lib/CodeGen/RegAllocFast.cpp =================================================================== --- lib/CodeGen/RegAllocFast.cpp +++ lib/CodeGen/RegAllocFast.cpp @@ -105,13 +105,8 @@ /// that it is alive across blocks. BitVector MayLiveAcrossBlocks; - /// State of a physical register. - enum RegState { - /// A disabled register is not available for allocation, but an alias may - /// be in use. A register can only be moved out of the disabled state if - /// all aliases are disabled. - regDisabled, - + /// State of a register unit. + enum RegUnitState { /// A free register is not currently in use and can be allocated /// immediately without checking aliases. regFree, @@ -125,8 +120,8 @@ /// register. In that case, LiveVirtRegs contains the inverse mapping. }; - /// Maps each physical register to a RegState enum or a virtual register. - std::vector PhysRegState; + /// Maps each physical register to a RegUnitState enum or virtual register. + std::vector RegUnitStates; SmallVector VirtDead; SmallVector Coalesced; @@ -137,6 +132,7 @@ RegUnitSet UsedInInstr; void setPhysRegState(MCPhysReg PhysReg, unsigned NewState); + bool isPhysRegFree(MCPhysReg PhysReg) const; /// Mark a physreg as used in this instruction. void markRegUsedInInstr(MCPhysReg PhysReg) { @@ -195,7 +191,7 @@ void usePhysReg(MachineOperand &MO); void definePhysReg(MachineBasicBlock::iterator MI, MCPhysReg PhysReg, - RegState NewState); + unsigned NewState); unsigned calcSpillCost(MCPhysReg PhysReg) const; void assignVirtToPhysReg(LiveReg &, MCPhysReg PhysReg); @@ -228,7 +224,8 @@ bool mayLiveOut(unsigned VirtReg); bool mayLiveIn(unsigned VirtReg); - void dumpState(); + void printRegUnitState(unsigned State) const; + void dumpState() const; }; } // end anonymous namespace @@ -239,7 +236,16 @@ false) void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) { - PhysRegState[PhysReg] = NewState; + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) + RegUnitStates[*UI] = NewState; +} + +bool RegAllocFast::isPhysRegFree(MCPhysReg PhysReg) const { + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + if (RegUnitStates[*UI] != regFree) + return false; + } + return true; } /// This allocates space for the specified virtual register to be held on the @@ -386,9 +392,14 @@ /// Mark virtreg as no longer available. void RegAllocFast::killVirtReg(LiveReg &LR) { addKillFlag(LR); - assert(PhysRegState[LR.PhysReg] == LR.VirtReg && - "Broken RegState mapping"); - setPhysRegState(LR.PhysReg, regFree); + MCPhysReg PhysReg = LR.PhysReg; +#ifndef NDEBUG + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + assert(RegUnitStates[*UI] == LR.VirtReg && + "Broken RegState mapping"); + } +#endif + setPhysRegState(PhysReg, regFree); LR.PhysReg = 0; } @@ -415,7 +426,12 @@ /// Do the actual work of spilling. void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR) { - assert(PhysRegState[LR.PhysReg] == LR.VirtReg && "Broken RegState mapping"); + MCPhysReg PhysReg = LR.PhysReg; +#ifndef NDEBUG + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) + assert(RegUnitStates[*UI] == LR.VirtReg && + "Broken RegState mapping"); +#endif if (LR.Dirty) { // If this physreg is used by the instruction, we want to kill it on the @@ -423,7 +439,7 @@ bool SpillKill = MachineBasicBlock::iterator(LR.LastUse) != MI; LR.Dirty = false; - spill(MI, LR.VirtReg, LR.PhysReg, SpillKill); + spill(MI, LR.VirtReg, PhysReg, SpillKill); if (SpillKill) LR.LastUse = nullptr; // Don't kill register again @@ -460,53 +476,16 @@ "Bad usePhysReg operand"); markRegUsedInInstr(PhysReg); - switch (PhysRegState[PhysReg]) { - case regDisabled: - break; - case regReserved: - PhysRegState[PhysReg] = regFree; - LLVM_FALLTHROUGH; - case regFree: - MO.setIsKill(); - return; - default: - // The physreg was allocated to a virtual register. That means the value we - // wanted has been clobbered. - llvm_unreachable("Instruction uses an allocated register"); - } - // Maybe a superregister is reserved? - for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) { - MCPhysReg Alias = *AI; - switch (PhysRegState[Alias]) { - case regDisabled: - break; + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + switch (RegUnitStates[*UI]) { case regReserved: - // Either PhysReg is a subregister of Alias and we mark the - // whole register as free, or PhysReg is the superregister of - // Alias and we mark all the aliases as disabled before freeing - // PhysReg. - // In the latter case, since PhysReg was disabled, this means that - // its value is defined only by physical sub-registers. This check - // is performed by the assert of the default case in this loop. - // Note: The value of the superregister may only be partial - // defined, that is why regDisabled is a valid state for aliases. - assert((TRI->isSuperRegister(PhysReg, Alias) || - TRI->isSuperRegister(Alias, PhysReg)) && - "Instruction is not using a subregister of a reserved register"); + RegUnitStates[*UI] = regFree; LLVM_FALLTHROUGH; case regFree: - if (TRI->isSuperRegister(PhysReg, Alias)) { - // Leave the superregister in the working set. - setPhysRegState(Alias, regFree); - MO.getParent()->addRegisterKilled(Alias, TRI, true); - return; - } - // Some other alias was in the working set - clear it. - setPhysRegState(Alias, regDisabled); break; default: - llvm_unreachable("Instruction uses an alias of an allocated register"); + llvm_unreachable("Unexpected reg unit state"); } } @@ -519,38 +498,20 @@ /// similar to defineVirtReg except the physreg is reserved instead of /// allocated. void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI, - MCPhysReg PhysReg, RegState NewState) { - markRegUsedInInstr(PhysReg); - switch (unsigned VirtReg = PhysRegState[PhysReg]) { - case regDisabled: - break; - default: - spillVirtReg(MI, VirtReg); - LLVM_FALLTHROUGH; - case regFree: - case regReserved: - setPhysRegState(PhysReg, NewState); - return; - } - - // This is a disabled register, disable all aliases. - setPhysRegState(PhysReg, NewState); - for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) { - MCPhysReg Alias = *AI; - switch (unsigned VirtReg = PhysRegState[Alias]) { - case regDisabled: - break; + MCPhysReg PhysReg, unsigned NewState) { + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + switch (unsigned VirtReg = RegUnitStates[*UI]) { default: spillVirtReg(MI, VirtReg); - LLVM_FALLTHROUGH; + break; case regFree: case regReserved: - setPhysRegState(Alias, regDisabled); - if (TRI->isSuperRegister(PhysReg, Alias)) - return; break; } } + + markRegUsedInInstr(PhysReg); + setPhysRegState(PhysReg, NewState); } /// Return the cost of spilling clearing out PhysReg and aliases so it is free @@ -563,46 +524,24 @@ << " is already used in instr.\n"); return spillImpossible; } - switch (unsigned VirtReg = PhysRegState[PhysReg]) { - case regDisabled: - break; - case regFree: - return 0; - case regReserved: - LLVM_DEBUG(dbgs() << printReg(VirtReg, TRI) << " corresponding " - << printReg(PhysReg, TRI) << " is reserved already.\n"); - return spillImpossible; - default: { - LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg); - assert(LRI != LiveVirtRegs.end() && LRI->PhysReg && - "Missing VirtReg entry"); - return LRI->Dirty ? spillDirty : spillClean; - } - } - // This is a disabled register, add up cost of aliases. - LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is disabled.\n"); - unsigned Cost = 0; - for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) { - MCPhysReg Alias = *AI; - switch (unsigned VirtReg = PhysRegState[Alias]) { - case regDisabled: - break; + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + switch (unsigned VirtReg = RegUnitStates[*UI]) { case regFree: - ++Cost; break; case regReserved: + LLVM_DEBUG(dbgs() << printReg(VirtReg, TRI) << " corresponding " + << printReg(PhysReg, TRI) << " is reserved already.\n"); return spillImpossible; default: { LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg); assert(LRI != LiveVirtRegs.end() && LRI->PhysReg && "Missing VirtReg entry"); - Cost += LRI->Dirty ? spillDirty : spillClean; - break; + return LRI->Dirty ? spillDirty : spillClean; } } } - return Cost; + return 0; } /// This method updates local state so that we know that PhysReg is the @@ -911,9 +850,10 @@ unsigned Reg = MO.getReg(); if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; markRegUsedInInstr(Reg); - for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { - if (ThroughRegs.count(PhysRegState[*AI])) - definePhysReg(MI, *AI, regFree); + for (MCRegUnitIterator UI(Reg, TRI); UI.isValid(); ++UI) { + if (ThroughRegs.count(RegUnitStates[*UI])) { + RegUnitStates[*UI] = regFree; + } } } @@ -974,39 +914,41 @@ } #ifndef NDEBUG -void RegAllocFast::dumpState() { - for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) { - if (PhysRegState[Reg] == regDisabled) continue; - dbgs() << " " << printReg(Reg, TRI); - switch(PhysRegState[Reg]) { + +void RegAllocFast::dumpState() const { + for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE; + ++Unit) { + switch (unsigned VirtReg = RegUnitStates[Unit]) { case regFree: break; case regReserved: - dbgs() << "*"; + dbgs() << " " << printRegUnit(Unit, TRI) << "[P]"; break; default: { - dbgs() << '=' << printReg(PhysRegState[Reg]); - LiveRegMap::iterator LRI = findLiveVirtReg(PhysRegState[Reg]); - assert(LRI != LiveVirtRegs.end() && LRI->PhysReg && - "Missing VirtReg entry"); - if (LRI->Dirty) - dbgs() << "*"; - assert(LRI->PhysReg == Reg && "Bad inverse map"); + dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg); + LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg); + assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry"); + if (I->Dirty) + dbgs() << "[D]"; + assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present"); break; } } } dbgs() << '\n'; // Check that LiveVirtRegs is the inverse. - for (LiveRegMap::iterator i = LiveVirtRegs.begin(), - e = LiveVirtRegs.end(); i != e; ++i) { - if (!i->PhysReg) - continue; - assert(TargetRegisterInfo::isVirtualRegister(i->VirtReg) && + for (const LiveReg &LR : LiveVirtRegs) { + unsigned VirtReg = LR.VirtReg; + assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Bad map key"); - assert(TargetRegisterInfo::isPhysicalRegister(i->PhysReg) && - "Bad map value"); - assert(PhysRegState[i->PhysReg] == i->VirtReg && "Bad inverse map"); + MCPhysReg PhysReg = LR.PhysReg; + if (PhysReg != 0) { + assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) && + "mapped to physreg"); + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + assert(RegUnitStates[*UI] == VirtReg && "inverse map valid"); + } + } } } #endif @@ -1247,7 +1189,7 @@ this->MBB = &MBB; LLVM_DEBUG(dbgs() << "\nAllocating " << MBB); - PhysRegState.assign(TRI->getNumRegs(), regDisabled); + RegUnitStates.assign(TRI->getNumRegUnits(), regFree); assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?"); MachineBasicBlock::iterator MII = MBB.begin(); Index: test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll +++ test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll @@ -4,8 +4,8 @@ define i32 @fptosi_wh(half %a) nounwind ssp { entry: ; CHECK-LABEL: fptosi_wh -; CHECK: fcvt s1, h0 -; CHECK: fcvtzs [[REG:w[0-9]+]], s1 +; CHECK: fcvt s0, h0 +; CHECK: fcvtzs [[REG:w[0-9]+]], s0 ; CHECK: mov w0, [[REG]] %conv = fptosi half %a to i32 ret i32 %conv @@ -15,8 +15,8 @@ define i32 @fptoui_swh(half %a) nounwind ssp { entry: ; CHECK-LABEL: fptoui_swh -; CHECK: fcvt s1, h0 -; CHECK: fcvtzu [[REG:w[0-9]+]], s1 +; CHECK: fcvt s0, h0 +; CHECK: fcvtzu [[REG:w[0-9]+]], s0 ; CHECK: mov w0, [[REG]] %conv = fptoui half %a to i32 ret i32 %conv Index: test/CodeGen/AArch64/arm64-fast-isel-conversion.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-conversion.ll +++ test/CodeGen/AArch64/arm64-fast-isel-conversion.ll @@ -54,8 +54,8 @@ ; CHECK: ldrh w8, [sp, #12] ; CHECK: str w8, [sp, #8] ; CHECK: ldr w8, [sp, #8] -; CHECK: mov x9, x8 -; CHECK: str x9, [sp] +; CHECK: ; kill: def $x8 killed $w8 +; CHECK: str x8, [sp] ; CHECK: ldr x0, [sp] ; CHECK: ret %a.addr = alloca i8, align 1 @@ -109,8 +109,8 @@ ; CHECK: strh w8, [sp, #12] ; CHECK: ldrsh w8, [sp, #12] ; CHECK: str w8, [sp, #8] -; CHECK: ldrsw x9, [sp, #8] -; CHECK: str x9, [sp] +; CHECK: ldrsw x8, [sp, #8] +; CHECK: str x8, [sp] ; CHECK: ldr x0, [sp] ; CHECK: ret %a.addr = alloca i8, align 1 Index: test/CodeGen/AArch64/arm64-vcvt_f.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcvt_f.ll +++ test/CodeGen/AArch64/arm64-vcvt_f.ll @@ -140,11 +140,11 @@ ; FAST: // %bb.0: ; FAST-NEXT: sub sp, sp, #16 // =16 ; FAST-NEXT: .cfi_def_cfa_offset 16 -; FAST-NEXT: fcvt h1, s0 +; FAST-NEXT: fcvt h0, s0 ; FAST-NEXT: // implicit-def: $w0 -; FAST-NEXT: fmov s0, w0 -; FAST-NEXT: mov.16b v0, v1 -; FAST-NEXT: fmov w8, s0 +; FAST-NEXT: fmov s1, w0 +; FAST-NEXT: mov.16b v1, v0 +; FAST-NEXT: fmov w8, s1 ; FAST-NEXT: mov w0, w8 ; FAST-NEXT: str w0, [sp, #12] // 4-byte Folded Spill ; FAST-NEXT: mov w0, w8 Index: test/CodeGen/AArch64/fast-isel-sp-adjust.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-sp-adjust.ll +++ test/CodeGen/AArch64/fast-isel-sp-adjust.ll @@ -14,8 +14,7 @@ ; CHECK-LABEL: foo: ; CHECK-DAG: mov x[[SP:[0-9]+]], sp -; CHECK-DAG: mov [[TMP:w[0-9]+]], #4104 -; CHECK: mov w[[OFFSET:[0-9]+]], [[TMP]] +; CHECK-DAG: mov w[[OFFSET:[0-9]+]], #4104 ; CHECK: strb w0, [x[[SP]], x[[OFFSET]]] define void @foo(i8 %in) { Index: test/CodeGen/AMDGPU/indirect-addressing-term.ll =================================================================== --- test/CodeGen/AMDGPU/indirect-addressing-term.ll +++ test/CodeGen/AMDGPU/indirect-addressing-term.ll @@ -69,15 +69,15 @@ ; GCN: renamable $vgpr30 = COPY killed renamable $vgpr14 ; GCN: renamable $vgpr31 = COPY killed renamable $vgpr15 ; GCN: renamable $vgpr32 = COPY killed renamable $vgpr16 - ; GCN: renamable $sgpr22_sgpr23 = S_MOV_B64 $exec + ; GCN: renamable $sgpr0_sgpr1 = S_MOV_B64 $exec ; GCN: renamable $vgpr1 = IMPLICIT_DEF - ; GCN: renamable $sgpr24_sgpr25 = IMPLICIT_DEF + ; GCN: renamable $sgpr4_sgpr5 = IMPLICIT_DEF ; GCN: SI_SPILL_V32_SAVE killed $vgpr0, %stack.0, $sgpr96_sgpr97_sgpr98_sgpr99, $sgpr3, 0, implicit $exec :: (store 4 into %stack.0, addrspace 5) ; GCN: SI_SPILL_S128_SAVE killed $sgpr8_sgpr9_sgpr10_sgpr11, %stack.1, implicit $exec, implicit $sgpr96_sgpr97_sgpr98_sgpr99, implicit $sgpr3, implicit-def dead $m0 :: (store 16 into %stack.1, align 4, addrspace 5) ; GCN: SI_SPILL_V512_SAVE killed $vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32, %stack.2, $sgpr96_sgpr97_sgpr98_sgpr99, $sgpr3, 0, implicit $exec :: (store 64 into %stack.2, align 4, addrspace 5) - ; GCN: SI_SPILL_S64_SAVE killed $sgpr22_sgpr23, %stack.3, implicit $exec, implicit $sgpr96_sgpr97_sgpr98_sgpr99, implicit $sgpr3, implicit-def dead $m0 :: (store 8 into %stack.3, align 4, addrspace 5) + ; GCN: SI_SPILL_S64_SAVE killed $sgpr0_sgpr1, %stack.3, implicit $exec, implicit $sgpr96_sgpr97_sgpr98_sgpr99, implicit $sgpr3, implicit-def dead $m0 :: (store 8 into %stack.3, align 4, addrspace 5) ; GCN: SI_SPILL_V32_SAVE killed $vgpr1, %stack.4, $sgpr96_sgpr97_sgpr98_sgpr99, $sgpr3, 0, implicit $exec :: (store 4 into %stack.4, addrspace 5) - ; GCN: SI_SPILL_S64_SAVE killed $sgpr24_sgpr25, %stack.5, implicit $exec, implicit $sgpr96_sgpr97_sgpr98_sgpr99, implicit $sgpr3, implicit-def dead $m0 :: (store 8 into %stack.5, align 4, addrspace 5) + ; GCN: SI_SPILL_S64_SAVE killed $sgpr4_sgpr5, %stack.5, implicit $exec, implicit $sgpr96_sgpr97_sgpr98_sgpr99, implicit $sgpr3, implicit-def dead $m0 :: (store 8 into %stack.5, align 4, addrspace 5) ; GCN: bb.1: ; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; GCN: $sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.5, implicit $exec, implicit $sgpr96_sgpr97_sgpr98_sgpr99, implicit $sgpr3, implicit-def dead $m0 :: (load 8 from %stack.5, align 4, addrspace 5) Index: test/CodeGen/AMDGPU/partial-sgpr-to-vgpr-spills.ll =================================================================== --- test/CodeGen/AMDGPU/partial-sgpr-to-vgpr-spills.ll +++ test/CodeGen/AMDGPU/partial-sgpr-to-vgpr-spills.ll @@ -395,22 +395,22 @@ ; GCN: def s[4:19] ; GCN: def s[20:35] -; GCN: v_writelane_b32 v0, s4, 48 -; GCN-NEXT: v_writelane_b32 v0, s5, 49 -; GCN-NEXT: v_writelane_b32 v0, s6, 50 -; GCN-NEXT: v_writelane_b32 v0, s7, 51 -; GCN-NEXT: v_writelane_b32 v0, s8, 52 -; GCN-NEXT: v_writelane_b32 v0, s9, 53 -; GCN-NEXT: v_writelane_b32 v0, s10, 54 -; GCN-NEXT: v_writelane_b32 v0, s11, 55 -; GCN-NEXT: v_writelane_b32 v0, s12, 56 -; GCN-NEXT: v_writelane_b32 v0, s13, 57 -; GCN-NEXT: v_writelane_b32 v0, s14, 58 -; GCN-NEXT: v_writelane_b32 v0, s15, 59 -; GCN-NEXT: v_writelane_b32 v0, s16, 60 -; GCN-NEXT: v_writelane_b32 v0, s17, 61 -; GCN-NEXT: v_writelane_b32 v0, s18, 62 -; GCN-NEXT: v_writelane_b32 v0, s19, 63 +; GCN: v_writelane_b32 v0, s20, 48 +; GCN-NEXT: v_writelane_b32 v0, s21, 49 +; GCN-NEXT: v_writelane_b32 v0, s22, 50 +; GCN-NEXT: v_writelane_b32 v0, s23, 51 +; GCN-NEXT: v_writelane_b32 v0, s24, 52 +; GCN-NEXT: v_writelane_b32 v0, s25, 53 +; GCN-NEXT: v_writelane_b32 v0, s26, 54 +; GCN-NEXT: v_writelane_b32 v0, s27, 55 +; GCN-NEXT: v_writelane_b32 v0, s28, 56 +; GCN-NEXT: v_writelane_b32 v0, s29, 57 +; GCN-NEXT: v_writelane_b32 v0, s30, 58 +; GCN-NEXT: v_writelane_b32 v0, s31, 59 +; GCN-NEXT: v_writelane_b32 v0, s32, 60 +; GCN-NEXT: v_writelane_b32 v0, s33, 61 +; GCN-NEXT: v_writelane_b32 v0, s34, 62 +; GCN-NEXT: v_writelane_b32 v0, s35, 63 ; GCN: v_readlane_b32 s4, v0, 48 ; GCN-NEXT: v_readlane_b32 s5, v0, 49 @@ -491,7 +491,7 @@ ; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 30 ; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 31 -; GCN: def s[0:1] +; GCN: def s[36:37] ; GCN: v_writelane_b32 v23, s20, 32 ; GCN-NEXT: v_writelane_b32 v23, s21, 33 Index: test/CodeGen/AMDGPU/spill-m0.ll =================================================================== --- test/CodeGen/AMDGPU/spill-m0.ll +++ test/CodeGen/AMDGPU/spill-m0.ll @@ -115,7 +115,7 @@ ; GCN: ; def m0, 1 -; GCN: s_mov_b32 m0, s2 +; GCN: s_mov_b32 m0, s0 ; GCN: v_interp_mov_f32 ; GCN: ; clobber m0 @@ -171,21 +171,21 @@ ; TOSMEM: s_mov_b32 m0, -1 -; TOSMEM: s_mov_b32 s0, m0 +; TOSMEM: s_mov_b32 s2, m0 ; TOSMEM: s_add_u32 m0, s3, 0x200 ; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload -; TOSMEM: s_mov_b32 m0, s0 +; TOSMEM: s_mov_b32 m0, s2 ; TOSMEM: s_waitcnt lgkmcnt(0) ; TOSMEM: ds_write_b64 ; FIXME-TOSMEM-NOT: m0 ; TOSMEM: s_add_u32 m0, s3, 0x100 -; TOSMEM: s_buffer_load_dword s0, s[88:91], m0 ; 4-byte Folded Reload +; TOSMEM: s_buffer_load_dword s2, s[88:91], m0 ; 4-byte Folded Reload ; FIXME-TOSMEM-NOT: m0 ; TOSMEM: s_waitcnt lgkmcnt(0) ; TOSMEM-NOT: m0 -; TOSMEM: s_mov_b32 m0, s0 +; TOSMEM: s_mov_b32 m0, s2 ; TOSMEM: ; use m0 ; TOSMEM: s_dcache_wb Index: test/CodeGen/AMDGPU/wwm-reserved.ll =================================================================== --- test/CodeGen/AMDGPU/wwm-reserved.ll +++ test/CodeGen/AMDGPU/wwm-reserved.ll @@ -90,10 +90,10 @@ } define amdgpu_kernel void @call(<4 x i32> inreg %tmp14, i32 inreg %arg) { -; GFX9-O0: v_mov_b32_e32 v0, s2 +; GFX9-O0: v_mov_b32_e32 v0, s0 ; GFX9-O3: v_mov_b32_e32 v2, s0 ; GFX9-NEXT: s_not_b64 exec, exec -; GFX9-O0-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-O0-NEXT: v_mov_b32_e32 v0, s1 ; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_not_b64 exec, exec %tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %arg, i32 0) @@ -136,8 +136,8 @@ ; GFX9-O0: buffer_store_dword v1 ; GFX9: s_swappc_b64 %tmp134 = call i64 @called_i64(i64 %tmp107) -; GFX9-O0: buffer_load_dword v6 -; GFX9-O0: buffer_load_dword v7 +; GFX9-O0: buffer_load_dword v4 +; GFX9-O0: buffer_load_dword v5 %tmp136 = add i64 %tmp134, %tmp107 %tmp137 = tail call i64 @llvm.amdgcn.wwm.i64(i64 %tmp136) %tmp138 = bitcast i64 %tmp137 to <2 x i32> Index: test/CodeGen/Mips/atomic.ll =================================================================== --- test/CodeGen/Mips/atomic.ll +++ test/CodeGen/Mips/atomic.ll @@ -2559,28 +2559,28 @@ ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) ; MIPS64R6O0-NEXT: daddiu $2, $zero, -4 ; MIPS64R6O0-NEXT: and $2, $1, $2 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $4, $4, $3 +; MIPS64R6O0-NEXT: andi $1, $1, 3 +; MIPS64R6O0-NEXT: xori $1, $1, 3 +; MIPS64R6O0-NEXT: sll $1, $1, 3 +; MIPS64R6O0-NEXT: ori $3, $zero, 255 +; MIPS64R6O0-NEXT: sllv $3, $3, $1 +; MIPS64R6O0-NEXT: nor $5, $zero, $3 +; MIPS64R6O0-NEXT: sllv $4, $4, $1 ; MIPS64R6O0-NEXT: .LBB8_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($2) -; MIPS64R6O0-NEXT: addu $9, $8, $4 -; MIPS64R6O0-NEXT: and $9, $9, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($2) -; MIPS64R6O0-NEXT: beqzc $10, .LBB8_1 +; MIPS64R6O0-NEXT: ll $7, 0($2) +; MIPS64R6O0-NEXT: addu $8, $7, $4 +; MIPS64R6O0-NEXT: and $8, $8, $3 +; MIPS64R6O0-NEXT: and $9, $7, $5 +; MIPS64R6O0-NEXT: or $9, $9, $8 +; MIPS64R6O0-NEXT: sc $9, 0($2) +; MIPS64R6O0-NEXT: beqzc $9, .LBB8_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seb $7, $7 +; MIPS64R6O0-NEXT: and $6, $7, $3 +; MIPS64R6O0-NEXT: srlv $6, $6, $1 +; MIPS64R6O0-NEXT: seb $6, $6 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $6, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seb $2, $1 @@ -3075,28 +3075,28 @@ ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) ; MIPS64R6O0-NEXT: daddiu $2, $zero, -4 ; MIPS64R6O0-NEXT: and $2, $1, $2 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $4, $4, $3 +; MIPS64R6O0-NEXT: andi $1, $1, 3 +; MIPS64R6O0-NEXT: xori $1, $1, 3 +; MIPS64R6O0-NEXT: sll $1, $1, 3 +; MIPS64R6O0-NEXT: ori $3, $zero, 255 +; MIPS64R6O0-NEXT: sllv $3, $3, $1 +; MIPS64R6O0-NEXT: nor $5, $zero, $3 +; MIPS64R6O0-NEXT: sllv $4, $4, $1 ; MIPS64R6O0-NEXT: .LBB9_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($2) -; MIPS64R6O0-NEXT: subu $9, $8, $4 -; MIPS64R6O0-NEXT: and $9, $9, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($2) -; MIPS64R6O0-NEXT: beqzc $10, .LBB9_1 +; MIPS64R6O0-NEXT: ll $7, 0($2) +; MIPS64R6O0-NEXT: subu $8, $7, $4 +; MIPS64R6O0-NEXT: and $8, $8, $3 +; MIPS64R6O0-NEXT: and $9, $7, $5 +; MIPS64R6O0-NEXT: or $9, $9, $8 +; MIPS64R6O0-NEXT: sc $9, 0($2) +; MIPS64R6O0-NEXT: beqzc $9, .LBB9_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seb $7, $7 +; MIPS64R6O0-NEXT: and $6, $7, $3 +; MIPS64R6O0-NEXT: srlv $6, $6, $1 +; MIPS64R6O0-NEXT: seb $6, $6 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $6, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seb $2, $1 @@ -3601,29 +3601,29 @@ ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) ; MIPS64R6O0-NEXT: daddiu $2, $zero, -4 ; MIPS64R6O0-NEXT: and $2, $1, $2 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $4, $4, $3 +; MIPS64R6O0-NEXT: andi $1, $1, 3 +; MIPS64R6O0-NEXT: xori $1, $1, 3 +; MIPS64R6O0-NEXT: sll $1, $1, 3 +; MIPS64R6O0-NEXT: ori $3, $zero, 255 +; MIPS64R6O0-NEXT: sllv $3, $3, $1 +; MIPS64R6O0-NEXT: nor $5, $zero, $3 +; MIPS64R6O0-NEXT: sllv $4, $4, $1 ; MIPS64R6O0-NEXT: .LBB10_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($2) -; MIPS64R6O0-NEXT: and $9, $8, $4 -; MIPS64R6O0-NEXT: nor $9, $zero, $9 -; MIPS64R6O0-NEXT: and $9, $9, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($2) -; MIPS64R6O0-NEXT: beqzc $10, .LBB10_1 +; MIPS64R6O0-NEXT: ll $7, 0($2) +; MIPS64R6O0-NEXT: and $8, $7, $4 +; MIPS64R6O0-NEXT: nor $8, $zero, $8 +; MIPS64R6O0-NEXT: and $8, $8, $3 +; MIPS64R6O0-NEXT: and $9, $7, $5 +; MIPS64R6O0-NEXT: or $9, $9, $8 +; MIPS64R6O0-NEXT: sc $9, 0($2) +; MIPS64R6O0-NEXT: beqzc $9, .LBB10_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seb $7, $7 +; MIPS64R6O0-NEXT: and $6, $7, $3 +; MIPS64R6O0-NEXT: srlv $6, $6, $1 +; MIPS64R6O0-NEXT: seb $6, $6 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $6, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seb $2, $1 @@ -4115,27 +4115,27 @@ ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) ; MIPS64R6O0-NEXT: daddiu $2, $zero, -4 ; MIPS64R6O0-NEXT: and $2, $1, $2 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $4, $4, $3 +; MIPS64R6O0-NEXT: andi $1, $1, 3 +; MIPS64R6O0-NEXT: xori $1, $1, 3 +; MIPS64R6O0-NEXT: sll $1, $1, 3 +; MIPS64R6O0-NEXT: ori $3, $zero, 255 +; MIPS64R6O0-NEXT: sllv $3, $3, $1 +; MIPS64R6O0-NEXT: nor $5, $zero, $3 +; MIPS64R6O0-NEXT: sllv $4, $4, $1 ; MIPS64R6O0-NEXT: .LBB11_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($2) -; MIPS64R6O0-NEXT: and $9, $4, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($2) -; MIPS64R6O0-NEXT: beqzc $10, .LBB11_1 +; MIPS64R6O0-NEXT: ll $7, 0($2) +; MIPS64R6O0-NEXT: and $8, $4, $3 +; MIPS64R6O0-NEXT: and $9, $7, $5 +; MIPS64R6O0-NEXT: or $9, $9, $8 +; MIPS64R6O0-NEXT: sc $9, 0($2) +; MIPS64R6O0-NEXT: beqzc $9, .LBB11_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seb $7, $7 +; MIPS64R6O0-NEXT: and $6, $7, $3 +; MIPS64R6O0-NEXT: srlv $6, $6, $1 +; MIPS64R6O0-NEXT: seb $6, $6 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $6, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seb $2, $1 @@ -4666,32 +4666,32 @@ ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) ; MIPS64R6O0-NEXT: daddiu $2, $zero, -4 ; MIPS64R6O0-NEXT: and $2, $1, $2 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $6, $zero, 255 -; MIPS64R6O0-NEXT: sllv $6, $6, $3 -; MIPS64R6O0-NEXT: nor $7, $zero, $6 +; MIPS64R6O0-NEXT: andi $1, $1, 3 +; MIPS64R6O0-NEXT: xori $1, $1, 3 +; MIPS64R6O0-NEXT: sll $1, $1, 3 +; MIPS64R6O0-NEXT: ori $3, $zero, 255 +; MIPS64R6O0-NEXT: sllv $3, $3, $1 +; MIPS64R6O0-NEXT: nor $6, $zero, $3 ; MIPS64R6O0-NEXT: andi $4, $4, 255 -; MIPS64R6O0-NEXT: sllv $4, $4, $3 +; MIPS64R6O0-NEXT: sllv $4, $4, $1 ; MIPS64R6O0-NEXT: andi $5, $5, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 +; MIPS64R6O0-NEXT: sllv $5, $5, $1 ; MIPS64R6O0-NEXT: .LBB12_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $9, 0($2) -; MIPS64R6O0-NEXT: and $10, $9, $6 -; MIPS64R6O0-NEXT: bnec $10, $4, .LBB12_3 +; MIPS64R6O0-NEXT: ll $8, 0($2) +; MIPS64R6O0-NEXT: and $9, $8, $3 +; MIPS64R6O0-NEXT: bnec $9, $4, .LBB12_3 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: # in Loop: Header=BB12_1 Depth=1 -; MIPS64R6O0-NEXT: and $9, $9, $7 -; MIPS64R6O0-NEXT: or $9, $9, $5 -; MIPS64R6O0-NEXT: sc $9, 0($2) -; MIPS64R6O0-NEXT: beqzc $9, .LBB12_1 +; MIPS64R6O0-NEXT: and $8, $8, $6 +; MIPS64R6O0-NEXT: or $8, $8, $5 +; MIPS64R6O0-NEXT: sc $8, 0($2) +; MIPS64R6O0-NEXT: beqzc $8, .LBB12_1 ; MIPS64R6O0-NEXT: .LBB12_3: # %entry -; MIPS64R6O0-NEXT: srlv $8, $10, $3 -; MIPS64R6O0-NEXT: seb $8, $8 +; MIPS64R6O0-NEXT: srlv $7, $9, $1 +; MIPS64R6O0-NEXT: seb $7, $7 ; MIPS64R6O0-NEXT: # %bb.4: # %entry -; MIPS64R6O0-NEXT: sw $8, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.5: # %entry ; MIPS64R6O0-NEXT: lw $2, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 @@ -5236,28 +5236,28 @@ ; MIPS64R6O0-NEXT: sll $2, $2, 3 ; MIPS64R6O0-NEXT: ori $3, $zero, 255 ; MIPS64R6O0-NEXT: sllv $3, $3, $2 -; MIPS64R6O0-NEXT: nor $7, $zero, $3 -; MIPS64R6O0-NEXT: andi $8, $5, 255 -; MIPS64R6O0-NEXT: sllv $8, $8, $2 +; MIPS64R6O0-NEXT: nor $4, $zero, $3 +; MIPS64R6O0-NEXT: andi $7, $5, 255 +; MIPS64R6O0-NEXT: sllv $7, $7, $2 ; MIPS64R6O0-NEXT: andi $6, $6, 255 ; MIPS64R6O0-NEXT: sllv $6, $6, $2 ; MIPS64R6O0-NEXT: .LBB13_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $10, 0($1) -; MIPS64R6O0-NEXT: and $11, $10, $3 -; MIPS64R6O0-NEXT: bnec $11, $8, .LBB13_3 +; MIPS64R6O0-NEXT: ll $9, 0($1) +; MIPS64R6O0-NEXT: and $10, $9, $3 +; MIPS64R6O0-NEXT: bnec $10, $7, .LBB13_3 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: # in Loop: Header=BB13_1 Depth=1 -; MIPS64R6O0-NEXT: and $10, $10, $7 -; MIPS64R6O0-NEXT: or $10, $10, $6 -; MIPS64R6O0-NEXT: sc $10, 0($1) -; MIPS64R6O0-NEXT: beqzc $10, .LBB13_1 +; MIPS64R6O0-NEXT: and $9, $9, $4 +; MIPS64R6O0-NEXT: or $9, $9, $6 +; MIPS64R6O0-NEXT: sc $9, 0($1) +; MIPS64R6O0-NEXT: beqzc $9, .LBB13_1 ; MIPS64R6O0-NEXT: .LBB13_3: # %entry -; MIPS64R6O0-NEXT: srlv $9, $11, $2 -; MIPS64R6O0-NEXT: seb $9, $9 +; MIPS64R6O0-NEXT: srlv $8, $10, $2 +; MIPS64R6O0-NEXT: seb $8, $8 ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: sw $5, 12($sp) # 4-byte Folded Spill -; MIPS64R6O0-NEXT: sw $9, 8($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $8, 8($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.5: # %entry ; MIPS64R6O0-NEXT: lw $1, 8($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: lw $2, 12($sp) # 4-byte Folded Reload @@ -5775,28 +5775,28 @@ ; MIPS64R6O0-NEXT: ld $1, %got_disp(z)($1) ; MIPS64R6O0-NEXT: daddiu $2, $zero, -4 ; MIPS64R6O0-NEXT: and $2, $1, $2 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 2 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 65535 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $4, $4, $3 +; MIPS64R6O0-NEXT: andi $1, $1, 3 +; MIPS64R6O0-NEXT: xori $1, $1, 2 +; MIPS64R6O0-NEXT: sll $1, $1, 3 +; MIPS64R6O0-NEXT: ori $3, $zero, 65535 +; MIPS64R6O0-NEXT: sllv $3, $3, $1 +; MIPS64R6O0-NEXT: nor $5, $zero, $3 +; MIPS64R6O0-NEXT: sllv $4, $4, $1 ; MIPS64R6O0-NEXT: .LBB14_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($2) -; MIPS64R6O0-NEXT: addu $9, $8, $4 -; MIPS64R6O0-NEXT: and $9, $9, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($2) -; MIPS64R6O0-NEXT: beqzc $10, .LBB14_1 +; MIPS64R6O0-NEXT: ll $7, 0($2) +; MIPS64R6O0-NEXT: addu $8, $7, $4 +; MIPS64R6O0-NEXT: and $8, $8, $3 +; MIPS64R6O0-NEXT: and $9, $7, $5 +; MIPS64R6O0-NEXT: or $9, $9, $8 +; MIPS64R6O0-NEXT: sc $9, 0($2) +; MIPS64R6O0-NEXT: beqzc $9, .LBB14_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seh $7, $7 +; MIPS64R6O0-NEXT: and $6, $7, $3 +; MIPS64R6O0-NEXT: srlv $6, $6, $1 +; MIPS64R6O0-NEXT: seh $6, $6 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $6, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seh $2, $1 @@ -6359,33 +6359,33 @@ ; MIPS64R6O0-NEXT: sll $3, $5, 0 ; MIPS64R6O0-NEXT: addu $2, $3, $2 ; MIPS64R6O0-NEXT: sync -; MIPS64R6O0-NEXT: daddiu $8, $zero, -4 -; MIPS64R6O0-NEXT: and $8, $4, $8 -; MIPS64R6O0-NEXT: andi $3, $4, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 2 -; MIPS64R6O0-NEXT: sll $3, $3, 3 +; MIPS64R6O0-NEXT: daddiu $3, $zero, -4 +; MIPS64R6O0-NEXT: and $3, $4, $3 +; MIPS64R6O0-NEXT: andi $4, $4, 3 +; MIPS64R6O0-NEXT: xori $4, $4, 2 +; MIPS64R6O0-NEXT: sll $4, $4, 3 ; MIPS64R6O0-NEXT: ori $5, $zero, 65535 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 +; MIPS64R6O0-NEXT: sllv $5, $5, $4 ; MIPS64R6O0-NEXT: nor $6, $zero, $5 ; MIPS64R6O0-NEXT: andi $7, $2, 65535 -; MIPS64R6O0-NEXT: sllv $7, $7, $3 +; MIPS64R6O0-NEXT: sllv $7, $7, $4 ; MIPS64R6O0-NEXT: andi $1, $1, 65535 -; MIPS64R6O0-NEXT: sllv $1, $1, $3 +; MIPS64R6O0-NEXT: sllv $1, $1, $4 ; MIPS64R6O0-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $10, 0($8) -; MIPS64R6O0-NEXT: and $11, $10, $5 -; MIPS64R6O0-NEXT: bnec $11, $7, .LBB15_3 +; MIPS64R6O0-NEXT: ll $9, 0($3) +; MIPS64R6O0-NEXT: and $10, $9, $5 +; MIPS64R6O0-NEXT: bnec $10, $7, .LBB15_3 ; MIPS64R6O0-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1 -; MIPS64R6O0-NEXT: and $10, $10, $6 -; MIPS64R6O0-NEXT: or $10, $10, $1 -; MIPS64R6O0-NEXT: sc $10, 0($8) -; MIPS64R6O0-NEXT: beqzc $10, .LBB15_1 +; MIPS64R6O0-NEXT: and $9, $9, $6 +; MIPS64R6O0-NEXT: or $9, $9, $1 +; MIPS64R6O0-NEXT: sc $9, 0($3) +; MIPS64R6O0-NEXT: beqzc $9, .LBB15_1 ; MIPS64R6O0-NEXT: .LBB15_3: -; MIPS64R6O0-NEXT: srlv $9, $11, $3 -; MIPS64R6O0-NEXT: seh $9, $9 +; MIPS64R6O0-NEXT: srlv $8, $10, $4 +; MIPS64R6O0-NEXT: seh $8, $8 ; MIPS64R6O0-NEXT: # %bb.4: ; MIPS64R6O0-NEXT: sw $2, 12($sp) # 4-byte Folded Spill -; MIPS64R6O0-NEXT: sw $9, 8($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $8, 8($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.5: ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seh $2, $1 @@ -7145,8 +7145,8 @@ ; MIPS64R6O0-NEXT: sc $6, 0($1) ; MIPS64R6O0-NEXT: beqzc $6, .LBB17_1 ; MIPS64R6O0-NEXT: .LBB17_3: # %entry -; MIPS64R6O0-NEXT: xor $2, $5, $3 -; MIPS64R6O0-NEXT: sltiu $2, $2, 1 +; MIPS64R6O0-NEXT: xor $1, $5, $3 +; MIPS64R6O0-NEXT: sltiu $2, $1, 1 ; MIPS64R6O0-NEXT: sync ; MIPS64R6O0-NEXT: jrc $ra ; Index: test/CodeGen/PowerPC/addegluecrash.ll =================================================================== --- test/CodeGen/PowerPC/addegluecrash.ll +++ test/CodeGen/PowerPC/addegluecrash.ll @@ -21,11 +21,11 @@ ; CHECK-NEXT: addze 5, 5 ; CHECK-NEXT: add 4, 5, 4 ; CHECK-NEXT: cmpld 7, 4, 5 -; CHECK-NEXT: mfocrf 10, 1 -; CHECK-NEXT: rlwinm 10, 10, 29, 31, 31 -; CHECK-NEXT: # implicit-def: $x4 -; CHECK-NEXT: mr 4, 10 -; CHECK-NEXT: clrldi 4, 4, 32 +; CHECK-NEXT: mfocrf 4, 1 +; CHECK-NEXT: rlwinm 4, 4, 29, 31, 31 +; CHECK-NEXT: # implicit-def: $x5 +; CHECK-NEXT: mr 5, 4 +; CHECK-NEXT: clrldi 4, 5, 32 ; CHECK-NEXT: std 4, 0(3) ; CHECK-NEXT: blr %1 = load i64, i64* %a, align 8 Index: test/CodeGen/PowerPC/vsx.ll =================================================================== --- test/CodeGen/PowerPC/vsx.ll +++ test/CodeGen/PowerPC/vsx.ll @@ -1548,8 +1548,8 @@ ; CHECK-FISL-NEXT: ld r3, -24(r1) ; CHECK-FISL-NEXT: std r3, -16(r1) ; CHECK-FISL-NEXT: addi r3, r1, -16 -; CHECK-FISL-NEXT: lxvd2x vs1, 0, r3 -; CHECK-FISL-NEXT: xxlor v2, vs1, vs1 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 ; CHECK-FISL-NEXT: blr ; ; CHECK-LE-LABEL: test46: @@ -1619,8 +1619,8 @@ ; CHECK-FISL-NEXT: ld r3, -24(r1) ; CHECK-FISL-NEXT: std r3, -16(r1) ; CHECK-FISL-NEXT: addi r3, r1, -16 -; CHECK-FISL-NEXT: lxvd2x vs1, 0, r3 -; CHECK-FISL-NEXT: xxlor v2, vs1, vs1 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 ; CHECK-FISL-NEXT: blr ; ; CHECK-LE-LABEL: test47: @@ -1865,13 +1865,13 @@ ; CHECK-FISL-NEXT: stxvd2x v3, 0, r3 ; CHECK-FISL-NEXT: addi r3, r1, -48 ; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 -; CHECK-FISL-NEXT: lwz r4, -20(r1) -; CHECK-FISL-NEXT: ld r3, -40(r1) -; CHECK-FISL-NEXT: sld r3, r3, r4 +; CHECK-FISL-NEXT: lwz r3, -20(r1) +; CHECK-FISL-NEXT: ld r4, -40(r1) +; CHECK-FISL-NEXT: sld r3, r4, r3 ; CHECK-FISL-NEXT: std r3, -8(r1) -; CHECK-FISL-NEXT: lwz r4, -28(r1) -; CHECK-FISL-NEXT: ld r3, -48(r1) -; CHECK-FISL-NEXT: sld r3, r3, r4 +; CHECK-FISL-NEXT: lwz r3, -28(r1) +; CHECK-FISL-NEXT: ld r4, -48(r1) +; CHECK-FISL-NEXT: sld r3, r4, r3 ; CHECK-FISL-NEXT: std r3, -16(r1) ; CHECK-FISL-NEXT: addi r3, r1, -16 ; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 @@ -1931,13 +1931,13 @@ ; CHECK-FISL-NEXT: stxvd2x v3, 0, r3 ; CHECK-FISL-NEXT: addi r3, r1, -48 ; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 -; CHECK-FISL-NEXT: lwz r4, -20(r1) -; CHECK-FISL-NEXT: ld r3, -40(r1) -; CHECK-FISL-NEXT: srd r3, r3, r4 +; CHECK-FISL-NEXT: lwz r3, -20(r1) +; CHECK-FISL-NEXT: ld r4, -40(r1) +; CHECK-FISL-NEXT: srd r3, r4, r3 ; CHECK-FISL-NEXT: std r3, -8(r1) -; CHECK-FISL-NEXT: lwz r4, -28(r1) -; CHECK-FISL-NEXT: ld r3, -48(r1) -; CHECK-FISL-NEXT: srd r3, r3, r4 +; CHECK-FISL-NEXT: lwz r3, -28(r1) +; CHECK-FISL-NEXT: ld r4, -48(r1) +; CHECK-FISL-NEXT: srd r3, r4, r3 ; CHECK-FISL-NEXT: std r3, -16(r1) ; CHECK-FISL-NEXT: addi r3, r1, -16 ; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 @@ -1997,13 +1997,13 @@ ; CHECK-FISL-NEXT: stxvd2x v3, 0, r3 ; CHECK-FISL-NEXT: addi r3, r1, -48 ; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 -; CHECK-FISL-NEXT: lwz r4, -20(r1) -; CHECK-FISL-NEXT: ld r3, -40(r1) -; CHECK-FISL-NEXT: srad r3, r3, r4 +; CHECK-FISL-NEXT: lwz r3, -20(r1) +; CHECK-FISL-NEXT: ld r4, -40(r1) +; CHECK-FISL-NEXT: srad r3, r4, r3 ; CHECK-FISL-NEXT: std r3, -8(r1) -; CHECK-FISL-NEXT: lwz r4, -28(r1) -; CHECK-FISL-NEXT: ld r3, -48(r1) -; CHECK-FISL-NEXT: srad r3, r3, r4 +; CHECK-FISL-NEXT: lwz r3, -28(r1) +; CHECK-FISL-NEXT: ld r4, -48(r1) +; CHECK-FISL-NEXT: srad r3, r4, r3 ; CHECK-FISL-NEXT: std r3, -16(r1) ; CHECK-FISL-NEXT: addi r3, r1, -16 ; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 @@ -2432,12 +2432,12 @@ ; CHECK-FISL: # %bb.0: ; CHECK-FISL-NEXT: # kill: def $r3 killed $r3 killed $x3 ; CHECK-FISL-NEXT: stw r3, -16(r1) -; CHECK-FISL-NEXT: addi r4, r1, -16 -; CHECK-FISL-NEXT: lxvw4x vs0, 0, r4 +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvw4x vs0, 0, r3 ; CHECK-FISL-NEXT: xxspltw v2, vs0, 0 -; CHECK-FISL-NEXT: addis r4, r2, .LCPI65_0@toc@ha -; CHECK-FISL-NEXT: addi r4, r4, .LCPI65_0@toc@l -; CHECK-FISL-NEXT: lxvw4x v3, 0, r4 +; CHECK-FISL-NEXT: addis r3, r2, .LCPI65_0@toc@ha +; CHECK-FISL-NEXT: addi r3, r3, .LCPI65_0@toc@l +; CHECK-FISL-NEXT: lxvw4x v3, 0, r3 ; CHECK-FISL-NEXT: vadduwm v2, v2, v3 ; CHECK-FISL-NEXT: blr ; Index: test/CodeGen/X86/atomic-unordered.ll =================================================================== --- test/CodeGen/X86/atomic-unordered.ll +++ test/CodeGen/X86/atomic-unordered.ll @@ -149,8 +149,8 @@ ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-O0-NEXT: andl $-256, %eax -; CHECK-O0-NEXT: movl %eax, %ecx -; CHECK-O0-NEXT: movq %rcx, (%rdi) +; CHECK-O0-NEXT: # kill: def $rax killed $eax +; CHECK-O0-NEXT: movq %rax, (%rdi) ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: narrow_writeback_and: @@ -279,10 +279,10 @@ ; CHECK-O0-NEXT: .cfi_def_cfa_offset 16 ; CHECK-O0-NEXT: .cfi_offset %rbx, -16 ; CHECK-O0-NEXT: xorl %eax, %eax -; CHECK-O0-NEXT: movl %eax, %ecx -; CHECK-O0-NEXT: movq %rcx, %rax -; CHECK-O0-NEXT: movq %rcx, %rdx -; CHECK-O0-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK-O0-NEXT: # kill: def $rax killed $eax +; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload +; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload ; CHECK-O0-NEXT: lock cmpxchg16b (%rdi) ; CHECK-O0-NEXT: popq %rbx @@ -374,14 +374,14 @@ ; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-O0-NEXT: callq __atomic_load ; CHECK-O0-NEXT: movq {{[0-9]+}}(%rsp), %rax +; CHECK-O0-NEXT: movq {{[0-9]+}}(%rsp), %rcx ; CHECK-O0-NEXT: movq {{[0-9]+}}(%rsp), %rdx ; CHECK-O0-NEXT: movq {{[0-9]+}}(%rsp), %rsi -; CHECK-O0-NEXT: movq {{[0-9]+}}(%rsp), %rdi -; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; CHECK-O0-NEXT: movq %rdi, 24(%r9) -; CHECK-O0-NEXT: movq %rsi, 16(%r9) -; CHECK-O0-NEXT: movq %rdx, 8(%r9) -; CHECK-O0-NEXT: movq %rax, (%r9) +; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload +; CHECK-O0-NEXT: movq %rsi, 24(%rdi) +; CHECK-O0-NEXT: movq %rdx, 16(%rdi) +; CHECK-O0-NEXT: movq %rcx, 8(%rdi) +; CHECK-O0-NEXT: movq %rax, (%rdi) ; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; CHECK-O0-NEXT: addq $56, %rsp ; CHECK-O0-NEXT: .cfi_def_cfa_offset 8 @@ -849,8 +849,8 @@ ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: xorl %ecx, %ecx ; CHECK-O0-NEXT: movl %ecx, %edx -; CHECK-O0-NEXT: movl $15, %esi -; CHECK-O0-NEXT: divq %rsi +; CHECK-O0-NEXT: movl $15, %ecx +; CHECK-O0-NEXT: divq %rcx ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_udiv1: @@ -1036,8 +1036,8 @@ ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: xorl %ecx, %ecx ; CHECK-O0-NEXT: movl %ecx, %edx -; CHECK-O0-NEXT: movl $15, %esi -; CHECK-O0-NEXT: divq %rsi +; CHECK-O0-NEXT: movl $15, %ecx +; CHECK-O0-NEXT: divq %rcx ; CHECK-O0-NEXT: movq %rdx, %rax ; CHECK-O0-NEXT: retq ; @@ -1500,9 +1500,9 @@ ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: movq (%rsi), %rcx ; CHECK-O0-NEXT: subq %rcx, %rax -; CHECK-O0-NEXT: sete %dl +; CHECK-O0-NEXT: sete %cl ; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; CHECK-O0-NEXT: movb %dl, %al +; CHECK-O0-NEXT: movb %cl, %al ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_icmp3: @@ -2060,8 +2060,8 @@ ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-O0-NEXT: andl $15, %eax -; CHECK-O0-NEXT: movl %eax, %ecx -; CHECK-O0-NEXT: movq %rcx, (%rdi) +; CHECK-O0-NEXT: # kill: def $rax killed $eax +; CHECK-O0-NEXT: movq %rax, (%rdi) ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: rmw_fold_and1: Index: test/CodeGen/X86/atomic32.ll =================================================================== --- test/CodeGen/X86/atomic32.ll +++ test/CodeGen/X86/atomic32.ll @@ -69,8 +69,8 @@ ; X64-NEXT: movl %eax, %ecx ; X64-NEXT: andl $5, %ecx ; X64-NEXT: lock cmpxchgl %ecx, {{.*}}(%rip) -; X64-NEXT: sete %dl -; X64-NEXT: testb $1, %dl +; X64-NEXT: sete %cl +; X64-NEXT: testb $1, %cl ; X64-NEXT: movl %eax, %ecx ; X64-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill @@ -93,8 +93,8 @@ ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: andl $5, %ecx ; X86-NEXT: lock cmpxchgl %ecx, sc32 -; X86-NEXT: sete %dl -; X86-NEXT: testb $1, %dl +; X86-NEXT: sete %cl +; X86-NEXT: testb $1, %cl ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl %eax, (%esp) # 4-byte Spill @@ -123,8 +123,8 @@ ; X64-NEXT: movl %eax, %ecx ; X64-NEXT: orl $5, %ecx ; X64-NEXT: lock cmpxchgl %ecx, {{.*}}(%rip) -; X64-NEXT: sete %dl -; X64-NEXT: testb $1, %dl +; X64-NEXT: sete %cl +; X64-NEXT: testb $1, %cl ; X64-NEXT: movl %eax, %ecx ; X64-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill @@ -147,8 +147,8 @@ ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: orl $5, %ecx ; X86-NEXT: lock cmpxchgl %ecx, sc32 -; X86-NEXT: sete %dl -; X86-NEXT: testb $1, %dl +; X86-NEXT: sete %cl +; X86-NEXT: testb $1, %cl ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl %eax, (%esp) # 4-byte Spill @@ -177,8 +177,8 @@ ; X64-NEXT: movl %eax, %ecx ; X64-NEXT: xorl $5, %ecx ; X64-NEXT: lock cmpxchgl %ecx, {{.*}}(%rip) -; X64-NEXT: sete %dl -; X64-NEXT: testb $1, %dl +; X64-NEXT: sete %cl +; X64-NEXT: testb $1, %cl ; X64-NEXT: movl %eax, %ecx ; X64-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill @@ -201,8 +201,8 @@ ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: xorl $5, %ecx ; X86-NEXT: lock cmpxchgl %ecx, sc32 -; X86-NEXT: sete %dl -; X86-NEXT: testb $1, %dl +; X86-NEXT: sete %cl +; X86-NEXT: testb $1, %cl ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl %eax, (%esp) # 4-byte Spill @@ -233,8 +233,8 @@ ; X64-NEXT: andl %edx, %ecx ; X64-NEXT: notl %ecx ; X64-NEXT: lock cmpxchgl %ecx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %cl +; X64-NEXT: testb $1, %cl ; X64-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: jne .LBB5_2 ; X64-NEXT: jmp .LBB5_1 @@ -243,7 +243,6 @@ ; ; X86-LABEL: atomic_fetch_nand32: ; X86: # %bb.0: -; X86-NEXT: pushl %ebx ; X86-NEXT: subl $8, %esp ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl sc32, %ecx @@ -257,14 +256,13 @@ ; X86-NEXT: andl %edx, %ecx ; X86-NEXT: notl %ecx ; X86-NEXT: lock cmpxchgl %ecx, sc32 -; X86-NEXT: sete %bl -; X86-NEXT: testb $1, %bl +; X86-NEXT: sete %cl +; X86-NEXT: testb $1, %cl ; X86-NEXT: movl %eax, (%esp) # 4-byte Spill ; X86-NEXT: jne .LBB5_2 ; X86-NEXT: jmp .LBB5_1 ; X86-NEXT: .LBB5_2: # %atomicrmw.end ; X86-NEXT: addl $8, %esp -; X86-NEXT: popl %ebx ; X86-NEXT: retl %t1 = atomicrmw nand i32* @sc32, i32 %x acquire ret void @@ -284,8 +282,8 @@ ; X64-NEXT: subl %edx, %ecx ; X64-NEXT: cmovgel %eax, %edx ; X64-NEXT: lock cmpxchgl %edx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %dl +; X64-NEXT: testb $1, %dl ; X64-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: jne .LBB6_2 @@ -295,7 +293,6 @@ ; ; X86-CMOV-LABEL: atomic_fetch_max32: ; X86-CMOV: # %bb.0: -; X86-CMOV-NEXT: pushl %ebx ; X86-CMOV-NEXT: subl $12, %esp ; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-CMOV-NEXT: movl sc32, %ecx @@ -309,20 +306,18 @@ ; X86-CMOV-NEXT: subl %edx, %ecx ; X86-CMOV-NEXT: cmovgel %eax, %edx ; X86-CMOV-NEXT: lock cmpxchgl %edx, sc32 -; X86-CMOV-NEXT: sete %bl -; X86-CMOV-NEXT: testb $1, %bl +; X86-CMOV-NEXT: sete %dl +; X86-CMOV-NEXT: testb $1, %dl ; X86-CMOV-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-CMOV-NEXT: movl %ecx, (%esp) # 4-byte Spill ; X86-CMOV-NEXT: jne .LBB6_2 ; X86-CMOV-NEXT: jmp .LBB6_1 ; X86-CMOV-NEXT: .LBB6_2: # %atomicrmw.end ; X86-CMOV-NEXT: addl $12, %esp -; X86-CMOV-NEXT: popl %ebx ; X86-CMOV-NEXT: retl ; ; X86-NOCMOV-LABEL: atomic_fetch_max32: ; X86-NOCMOV: # %bb.0: -; X86-NOCMOV-NEXT: pushl %ebx ; X86-NOCMOV-NEXT: pushl %esi ; X86-NOCMOV-NEXT: subl $20, %esp ; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -351,15 +346,14 @@ ; X86-NOCMOV-NEXT: movl %ecx, %eax ; X86-NOCMOV-NEXT: movl (%esp), %edx # 4-byte Reload ; X86-NOCMOV-NEXT: lock cmpxchgl %edx, sc32 -; X86-NOCMOV-NEXT: sete %bl -; X86-NOCMOV-NEXT: testb $1, %bl +; X86-NOCMOV-NEXT: sete %dl +; X86-NOCMOV-NEXT: testb $1, %dl ; X86-NOCMOV-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NOCMOV-NEXT: jne .LBB6_2 ; X86-NOCMOV-NEXT: jmp .LBB6_1 ; X86-NOCMOV-NEXT: .LBB6_2: # %atomicrmw.end ; X86-NOCMOV-NEXT: addl $20, %esp ; X86-NOCMOV-NEXT: popl %esi -; X86-NOCMOV-NEXT: popl %ebx ; X86-NOCMOV-NEXT: retl %t1 = atomicrmw max i32* @sc32, i32 %x acquire ret void @@ -379,8 +373,8 @@ ; X64-NEXT: subl %edx, %ecx ; X64-NEXT: cmovlel %eax, %edx ; X64-NEXT: lock cmpxchgl %edx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %dl +; X64-NEXT: testb $1, %dl ; X64-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: jne .LBB7_2 @@ -390,7 +384,6 @@ ; ; X86-CMOV-LABEL: atomic_fetch_min32: ; X86-CMOV: # %bb.0: -; X86-CMOV-NEXT: pushl %ebx ; X86-CMOV-NEXT: subl $12, %esp ; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-CMOV-NEXT: movl sc32, %ecx @@ -404,20 +397,18 @@ ; X86-CMOV-NEXT: subl %edx, %ecx ; X86-CMOV-NEXT: cmovlel %eax, %edx ; X86-CMOV-NEXT: lock cmpxchgl %edx, sc32 -; X86-CMOV-NEXT: sete %bl -; X86-CMOV-NEXT: testb $1, %bl +; X86-CMOV-NEXT: sete %dl +; X86-CMOV-NEXT: testb $1, %dl ; X86-CMOV-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-CMOV-NEXT: movl %ecx, (%esp) # 4-byte Spill ; X86-CMOV-NEXT: jne .LBB7_2 ; X86-CMOV-NEXT: jmp .LBB7_1 ; X86-CMOV-NEXT: .LBB7_2: # %atomicrmw.end ; X86-CMOV-NEXT: addl $12, %esp -; X86-CMOV-NEXT: popl %ebx ; X86-CMOV-NEXT: retl ; ; X86-NOCMOV-LABEL: atomic_fetch_min32: ; X86-NOCMOV: # %bb.0: -; X86-NOCMOV-NEXT: pushl %ebx ; X86-NOCMOV-NEXT: pushl %esi ; X86-NOCMOV-NEXT: subl $20, %esp ; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -446,15 +437,14 @@ ; X86-NOCMOV-NEXT: movl %ecx, %eax ; X86-NOCMOV-NEXT: movl (%esp), %edx # 4-byte Reload ; X86-NOCMOV-NEXT: lock cmpxchgl %edx, sc32 -; X86-NOCMOV-NEXT: sete %bl -; X86-NOCMOV-NEXT: testb $1, %bl +; X86-NOCMOV-NEXT: sete %dl +; X86-NOCMOV-NEXT: testb $1, %dl ; X86-NOCMOV-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NOCMOV-NEXT: jne .LBB7_2 ; X86-NOCMOV-NEXT: jmp .LBB7_1 ; X86-NOCMOV-NEXT: .LBB7_2: # %atomicrmw.end ; X86-NOCMOV-NEXT: addl $20, %esp ; X86-NOCMOV-NEXT: popl %esi -; X86-NOCMOV-NEXT: popl %ebx ; X86-NOCMOV-NEXT: retl %t1 = atomicrmw min i32* @sc32, i32 %x acquire ret void @@ -474,8 +464,8 @@ ; X64-NEXT: subl %edx, %ecx ; X64-NEXT: cmoval %eax, %edx ; X64-NEXT: lock cmpxchgl %edx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %dl +; X64-NEXT: testb $1, %dl ; X64-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: jne .LBB8_2 @@ -485,7 +475,6 @@ ; ; X86-CMOV-LABEL: atomic_fetch_umax32: ; X86-CMOV: # %bb.0: -; X86-CMOV-NEXT: pushl %ebx ; X86-CMOV-NEXT: subl $12, %esp ; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-CMOV-NEXT: movl sc32, %ecx @@ -499,20 +488,18 @@ ; X86-CMOV-NEXT: subl %edx, %ecx ; X86-CMOV-NEXT: cmoval %eax, %edx ; X86-CMOV-NEXT: lock cmpxchgl %edx, sc32 -; X86-CMOV-NEXT: sete %bl -; X86-CMOV-NEXT: testb $1, %bl +; X86-CMOV-NEXT: sete %dl +; X86-CMOV-NEXT: testb $1, %dl ; X86-CMOV-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-CMOV-NEXT: movl %ecx, (%esp) # 4-byte Spill ; X86-CMOV-NEXT: jne .LBB8_2 ; X86-CMOV-NEXT: jmp .LBB8_1 ; X86-CMOV-NEXT: .LBB8_2: # %atomicrmw.end ; X86-CMOV-NEXT: addl $12, %esp -; X86-CMOV-NEXT: popl %ebx ; X86-CMOV-NEXT: retl ; ; X86-NOCMOV-LABEL: atomic_fetch_umax32: ; X86-NOCMOV: # %bb.0: -; X86-NOCMOV-NEXT: pushl %ebx ; X86-NOCMOV-NEXT: pushl %esi ; X86-NOCMOV-NEXT: subl $20, %esp ; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -541,15 +528,14 @@ ; X86-NOCMOV-NEXT: movl %ecx, %eax ; X86-NOCMOV-NEXT: movl (%esp), %edx # 4-byte Reload ; X86-NOCMOV-NEXT: lock cmpxchgl %edx, sc32 -; X86-NOCMOV-NEXT: sete %bl -; X86-NOCMOV-NEXT: testb $1, %bl +; X86-NOCMOV-NEXT: sete %dl +; X86-NOCMOV-NEXT: testb $1, %dl ; X86-NOCMOV-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NOCMOV-NEXT: jne .LBB8_2 ; X86-NOCMOV-NEXT: jmp .LBB8_1 ; X86-NOCMOV-NEXT: .LBB8_2: # %atomicrmw.end ; X86-NOCMOV-NEXT: addl $20, %esp ; X86-NOCMOV-NEXT: popl %esi -; X86-NOCMOV-NEXT: popl %ebx ; X86-NOCMOV-NEXT: retl %t1 = atomicrmw umax i32* @sc32, i32 %x acquire ret void @@ -569,8 +555,8 @@ ; X64-NEXT: subl %edx, %ecx ; X64-NEXT: cmovbel %eax, %edx ; X64-NEXT: lock cmpxchgl %edx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %dl +; X64-NEXT: testb $1, %dl ; X64-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; X64-NEXT: jne .LBB9_2 @@ -580,7 +566,6 @@ ; ; X86-CMOV-LABEL: atomic_fetch_umin32: ; X86-CMOV: # %bb.0: -; X86-CMOV-NEXT: pushl %ebx ; X86-CMOV-NEXT: subl $12, %esp ; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-CMOV-NEXT: movl sc32, %ecx @@ -594,20 +579,18 @@ ; X86-CMOV-NEXT: subl %edx, %ecx ; X86-CMOV-NEXT: cmovbel %eax, %edx ; X86-CMOV-NEXT: lock cmpxchgl %edx, sc32 -; X86-CMOV-NEXT: sete %bl -; X86-CMOV-NEXT: testb $1, %bl +; X86-CMOV-NEXT: sete %dl +; X86-CMOV-NEXT: testb $1, %dl ; X86-CMOV-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-CMOV-NEXT: movl %ecx, (%esp) # 4-byte Spill ; X86-CMOV-NEXT: jne .LBB9_2 ; X86-CMOV-NEXT: jmp .LBB9_1 ; X86-CMOV-NEXT: .LBB9_2: # %atomicrmw.end ; X86-CMOV-NEXT: addl $12, %esp -; X86-CMOV-NEXT: popl %ebx ; X86-CMOV-NEXT: retl ; ; X86-NOCMOV-LABEL: atomic_fetch_umin32: ; X86-NOCMOV: # %bb.0: -; X86-NOCMOV-NEXT: pushl %ebx ; X86-NOCMOV-NEXT: pushl %esi ; X86-NOCMOV-NEXT: subl $20, %esp ; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -636,15 +619,14 @@ ; X86-NOCMOV-NEXT: movl %ecx, %eax ; X86-NOCMOV-NEXT: movl (%esp), %edx # 4-byte Reload ; X86-NOCMOV-NEXT: lock cmpxchgl %edx, sc32 -; X86-NOCMOV-NEXT: sete %bl -; X86-NOCMOV-NEXT: testb $1, %bl +; X86-NOCMOV-NEXT: sete %dl +; X86-NOCMOV-NEXT: testb $1, %dl ; X86-NOCMOV-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NOCMOV-NEXT: jne .LBB9_2 ; X86-NOCMOV-NEXT: jmp .LBB9_1 ; X86-NOCMOV-NEXT: .LBB9_2: # %atomicrmw.end ; X86-NOCMOV-NEXT: addl $20, %esp ; X86-NOCMOV-NEXT: popl %esi -; X86-NOCMOV-NEXT: popl %ebx ; X86-NOCMOV-NEXT: retl %t1 = atomicrmw umin i32* @sc32, i32 %x acquire ret void Index: test/CodeGen/X86/atomic64.ll =================================================================== --- test/CodeGen/X86/atomic64.ll +++ test/CodeGen/X86/atomic64.ll @@ -137,12 +137,12 @@ ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; X64-NEXT: movl %eax, %ecx ; X64-NEXT: andl $5, %ecx -; X64-NEXT: movl %ecx, %edx -; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; X64-NEXT: # kill: def $rcx killed $ecx +; X64-NEXT: lock cmpxchgq %rcx, {{.*}}(%rip) +; X64-NEXT: sete %cl +; X64-NEXT: testb $1, %cl +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: jne .LBB2_2 ; X64-NEXT: jmp .LBB2_1 @@ -202,8 +202,8 @@ ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: orq $5, %rcx ; X64-NEXT: lock cmpxchgq %rcx, {{.*}}(%rip) -; X64-NEXT: sete %dl -; X64-NEXT: testb $1, %dl +; X64-NEXT: sete %cl +; X64-NEXT: testb $1, %cl ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill @@ -265,8 +265,8 @@ ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: xorq $5, %rcx ; X64-NEXT: lock cmpxchgq %rcx, {{.*}}(%rip) -; X64-NEXT: sete %dl -; X64-NEXT: testb $1, %dl +; X64-NEXT: sete %cl +; X64-NEXT: testb $1, %cl ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill @@ -330,8 +330,8 @@ ; X64-NEXT: andq %rdx, %rcx ; X64-NEXT: notq %rcx ; X64-NEXT: lock cmpxchgq %rcx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %cl +; X64-NEXT: testb $1, %cl ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: jne .LBB5_2 ; X64-NEXT: jmp .LBB5_1 @@ -373,8 +373,8 @@ ; X64-NEXT: subq %rdx, %rcx ; X64-NEXT: cmovgeq %rax, %rdx ; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %dl +; X64-NEXT: testb $1, %dl ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: jne .LBB6_2 @@ -473,8 +473,8 @@ ; X64-NEXT: subq %rdx, %rcx ; X64-NEXT: cmovleq %rax, %rdx ; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %dl +; X64-NEXT: testb $1, %dl ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: jne .LBB7_2 @@ -571,8 +571,8 @@ ; X64-NEXT: subq %rdx, %rcx ; X64-NEXT: cmovaq %rax, %rdx ; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %dl +; X64-NEXT: testb $1, %dl ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: jne .LBB8_2 @@ -669,8 +669,8 @@ ; X64-NEXT: subq %rdx, %rcx ; X64-NEXT: cmovbeq %rax, %rdx ; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip) -; X64-NEXT: sete %sil -; X64-NEXT: testb $1, %sil +; X64-NEXT: sete %dl +; X64-NEXT: testb $1, %dl ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: jne .LBB9_2 Index: test/CodeGen/X86/avx-load-store.ll =================================================================== --- test/CodeGen/X86/avx-load-store.ll +++ test/CodeGen/X86/avx-load-store.ll @@ -175,8 +175,8 @@ ; CHECK_O0: # %bb.0: ; CHECK_O0-NEXT: # implicit-def: $ymm2 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 -; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 -; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi) +; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm0 +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) ; CHECK_O0-NEXT: vzeroupper ; CHECK_O0-NEXT: retq %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> @@ -197,8 +197,8 @@ ; CHECK_O0: # %bb.0: ; CHECK_O0-NEXT: # implicit-def: $ymm2 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 -; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 -; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi) +; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm0 +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) ; CHECK_O0-NEXT: vzeroupper ; CHECK_O0-NEXT: retq %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> @@ -241,10 +241,10 @@ ; CHECK_O0-NEXT: movl $-1, %eax ; CHECK_O0-NEXT: vmovd %eax, %xmm0 ; CHECK_O0-NEXT: vmovdqa %xmm0, %xmm0 -; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1 -; CHECK_O0-NEXT: # implicit-def: $rcx -; CHECK_O0-NEXT: # implicit-def: $ymm2 -; CHECK_O0-NEXT: vmaskmovps %ymm2, %ymm1, (%rcx) +; CHECK_O0-NEXT: # kill: def $ymm0 killed $xmm0 +; CHECK_O0-NEXT: # implicit-def: $rax +; CHECK_O0-NEXT: # implicit-def: $ymm1 +; CHECK_O0-NEXT: vmaskmovps %ymm1, %ymm0, (%rax) ; CHECK_O0-NEXT: .LBB9_4: # %cif_mixed_test_any_check allocas: br i1 undef, label %cif_mask_all, label %cif_mask_mixed @@ -278,8 +278,8 @@ ; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1 ; CHECK_O0-NEXT: # implicit-def: $ymm2 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 -; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 -; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi) +; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm0 +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) ; CHECK_O0-NEXT: vzeroupper ; CHECK_O0-NEXT: retq %b = load <8 x i32>, <8 x i32>* %bp, align 1 @@ -323,8 +323,8 @@ ; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1 ; CHECK_O0-NEXT: # implicit-def: $ymm2 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 -; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 -; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi) +; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm0 +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) ; CHECK_O0-NEXT: vzeroupper ; CHECK_O0-NEXT: retq %b = load <4 x i64>, <4 x i64>* %bp, align 16 Index: test/CodeGen/X86/avx512-mask-zext-bugfix.ll =================================================================== --- test/CodeGen/X86/avx512-mask-zext-bugfix.ll +++ test/CodeGen/X86/avx512-mask-zext-bugfix.ll @@ -40,22 +40,20 @@ ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload ; CHECK-NEXT: vpmovd2m %xmm0, %k0 ; CHECK-NEXT: kmovq %k0, %k1 -; CHECK-NEXT: kmovd %k0, %esi -; CHECK-NEXT: ## kill: def $sil killed $sil killed $esi -; CHECK-NEXT: movzbl %sil, %edi -; CHECK-NEXT: ## kill: def $di killed $di killed $edi -; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx ## 8-byte Reload -; CHECK-NEXT: movw %di, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill -; CHECK-NEXT: movq %rcx, %rdi -; CHECK-NEXT: movl $4, %r8d -; CHECK-NEXT: movl %r8d, %esi -; CHECK-NEXT: movl %r8d, %edx +; CHECK-NEXT: kmovd %k0, %ecx +; CHECK-NEXT: ## kill: def $cl killed $cl killed $ecx +; CHECK-NEXT: movzbl %cl, %ecx +; CHECK-NEXT: ## kill: def $cx killed $cx killed $ecx +; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi ## 8-byte Reload +; CHECK-NEXT: movl $4, %edx +; CHECK-NEXT: movl %edx, %esi ; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill ; CHECK-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill +; CHECK-NEXT: movw %cx, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill ; CHECK-NEXT: callq _calc_expected_mask_val ; CHECK-NEXT: ## kill: def $ax killed $ax killed $rax -; CHECK-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %r9w ## 2-byte Reload -; CHECK-NEXT: movzwl %r9w, %edi +; CHECK-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx ## 2-byte Reload +; CHECK-NEXT: movzwl %cx, %edi ; CHECK-NEXT: movzwl %ax, %esi ; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx ## 8-byte Reload ; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx ## 8-byte Reload Index: test/CodeGen/X86/crash-O0.ll =================================================================== --- test/CodeGen/X86/crash-O0.ll +++ test/CodeGen/X86/crash-O0.ll @@ -79,11 +79,12 @@ ; CHECK-NEXT: movq %rsp, %rbp ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: movl %eax, %ecx -; CHECK-NEXT: movq %rcx, %rax +; CHECK-NEXT: ## kill: def $rax killed $eax +; CHECK-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill ; CHECK-NEXT: cqto -; CHECK-NEXT: movslq %edi, %rsi -; CHECK-NEXT: idivq (%rcx,%rsi,8) +; CHECK-NEXT: movslq %edi, %rcx +; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi ## 8-byte Reload +; CHECK-NEXT: idivq (%rsi,%rcx,8) ; CHECK-NEXT: popq %rbp ; CHECK-NEXT: retq %gep = getelementptr i64, i64* null, i32 %V Index: test/CodeGen/X86/extend-set-cc-uses-dbg.ll =================================================================== --- test/CodeGen/X86/extend-set-cc-uses-dbg.ll +++ test/CodeGen/X86/extend-set-cc-uses-dbg.ll @@ -7,8 +7,8 @@ bb: %tmp = load i32, i32* %p, align 4, !dbg !7 ; CHECK: $eax = MOV32rm killed {{.*}} $rdi, {{.*}} debug-location !7 :: (load 4 from %ir.p) - ; CHECK-NEXT: $ecx = MOV32rr killed $eax, implicit-def $rcx, debug-location !7 - ; CHECK-NEXT: $rdx = MOV64rr $rcx, debug-location !7 + ; CHECK-NEXT: $rax = KILL killed renamable $eax, debug-location !7 + ; CHECK-NEXT: $rcx = MOV64rr $rax, debug-location !7 switch i32 %tmp, label %bb7 [ i32 0, label %bb1 Index: test/CodeGen/X86/fast-isel-nontemporal.ll =================================================================== --- test/CodeGen/X86/fast-isel-nontemporal.ll +++ test/CodeGen/X86/fast-isel-nontemporal.ll @@ -1041,11 +1041,11 @@ ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: $ymm1 -; AVX1-NEXT: vmovaps %xmm2, %xmm1 -; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: # implicit-def: $ymm2 +; AVX1-NEXT: vmovaps %xmm1, %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt16xfloat: @@ -1095,11 +1095,11 @@ ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: $ymm1 -; AVX1-NEXT: vmovaps %xmm2, %xmm1 -; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: # implicit-def: $ymm2 +; AVX1-NEXT: vmovaps %xmm1, %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt8xdouble: @@ -1149,11 +1149,11 @@ ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: $ymm1 -; AVX1-NEXT: vmovaps %xmm2, %xmm1 -; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: # implicit-def: $ymm2 +; AVX1-NEXT: vmovaps %xmm1, %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt64xi8: @@ -1215,11 +1215,11 @@ ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: $ymm1 -; AVX1-NEXT: vmovaps %xmm2, %xmm1 -; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: # implicit-def: $ymm2 +; AVX1-NEXT: vmovaps %xmm1, %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt32xi16: @@ -1281,11 +1281,11 @@ ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: $ymm1 -; AVX1-NEXT: vmovaps %xmm2, %xmm1 -; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: # implicit-def: $ymm2 +; AVX1-NEXT: vmovaps %xmm1, %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt16xi32: @@ -1335,11 +1335,11 @@ ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: $ymm1 -; AVX1-NEXT: vmovaps %xmm2, %xmm1 -; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: # implicit-def: $ymm2 +; AVX1-NEXT: vmovaps %xmm1, %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt8xi64: Index: test/CodeGen/X86/pr27591.ll =================================================================== --- test/CodeGen/X86/pr27591.ll +++ test/CodeGen/X86/pr27591.ll @@ -9,9 +9,9 @@ ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: cmpl $0, %edi ; CHECK-NEXT: setne %al -; CHECK-NEXT: movzbl %al, %ecx -; CHECK-NEXT: andl $1, %ecx -; CHECK-NEXT: movl %ecx, %edi +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: movl %eax, %edi ; CHECK-NEXT: callq callee1 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq @@ -27,10 +27,10 @@ ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: cmpl $0, %edi ; CHECK-NEXT: setne %al -; CHECK-NEXT: movzbl %al, %ecx -; CHECK-NEXT: andl $1, %ecx -; CHECK-NEXT: negl %ecx -; CHECK-NEXT: movl %ecx, %edi +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: negl %eax +; CHECK-NEXT: movl %eax, %edi ; CHECK-NEXT: callq callee2 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq Index: test/CodeGen/X86/pr30430.ll =================================================================== --- test/CodeGen/X86/pr30430.ll +++ test/CodeGen/X86/pr30430.ll @@ -75,28 +75,28 @@ ; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] ; CHECK-NEXT: # implicit-def: $ymm2 ; CHECK-NEXT: vmovaps %xmm1, %xmm2 -; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm2 -; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] -; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] +; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3] +; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3] +; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0] +; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0] ; CHECK-NEXT: # implicit-def: $ymm3 -; CHECK-NEXT: vmovaps %xmm1, %xmm3 -; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm3 -; CHECK-NEXT: # implicit-def: $zmm24 -; CHECK-NEXT: vmovaps %zmm3, %zmm24 -; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm24, %zmm24 -; CHECK-NEXT: vmovaps %zmm24, {{[0-9]+}}(%rsp) +; CHECK-NEXT: vmovaps %xmm2, %xmm3 +; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 +; CHECK-NEXT: # implicit-def: $zmm2 +; CHECK-NEXT: vmovaps %ymm1, %ymm2 +; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0 +; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp Index: test/CodeGen/X86/pr30813.ll =================================================================== --- test/CodeGen/X86/pr30813.ll +++ test/CodeGen/X86/pr30813.ll @@ -1,9 +1,8 @@ ; RUN: llc -mtriple=x86_64-linux-gnu -O0 %s -o - | FileCheck %s ; CHECK: patatino: ; CHECK: .cfi_startproc -; CHECK: movzwl (%rax), [[REG0:%e[abcd]x]] -; CHECK: movl [[REG0]], %e[[REG1C:[abcd]]]x -; CHECK: movq %r[[REG1C]]x, ({{%r[abcd]x}}) +; CHECK: movzwl (%rax), %e[[REG0:[abcd]x]] +; CHECK: movq %r[[REG0]], ({{%r[abcd]x}}) ; CHECK: retq define void @patatino() { Index: test/CodeGen/X86/pr32241.ll =================================================================== --- test/CodeGen/X86/pr32241.ll +++ test/CodeGen/X86/pr32241.ll @@ -23,14 +23,14 @@ ; CHECK-NEXT: .LBB0_2: # %lor.end ; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Reload ; CHECK-NEXT: andb $1, %al -; CHECK-NEXT: movzbl %al, %ecx -; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; CHECK-NEXT: cmpl %ecx, %edx +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload +; CHECK-NEXT: cmpl %eax, %ecx ; CHECK-NEXT: setl %al ; CHECK-NEXT: andb $1, %al -; CHECK-NEXT: movzbl %al, %ecx -; CHECK-NEXT: xorl $-1, %ecx -; CHECK-NEXT: cmpl $0, %ecx +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: xorl $-1, %eax +; CHECK-NEXT: cmpl $0, %eax ; CHECK-NEXT: movb $1, %al ; CHECK-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill ; CHECK-NEXT: jne .LBB0_4 @@ -42,9 +42,9 @@ ; CHECK-NEXT: .LBB0_4: # %lor.end5 ; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Reload ; CHECK-NEXT: andb $1, %al -; CHECK-NEXT: movzbl %al, %ecx -; CHECK-NEXT: # kill: def $cx killed $cx killed $ecx -; CHECK-NEXT: movw %cx, {{[0-9]+}}(%esp) +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax +; CHECK-NEXT: movw %ax, {{[0-9]+}}(%esp) ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: addl $16, %esp ; CHECK-NEXT: .cfi_def_cfa_offset 4 Index: test/CodeGen/X86/pr32284.ll =================================================================== --- test/CodeGen/X86/pr32284.ll +++ test/CodeGen/X86/pr32284.ll @@ -10,28 +10,28 @@ ; X86-O0-LABEL: foo: ; X86-O0: # %bb.0: # %entry ; X86-O0-NEXT: xorl %eax, %eax -; X86-O0-NEXT: movl %eax, %ecx -; X86-O0-NEXT: xorl %eax, %eax +; X86-O0-NEXT: # kill: def $rax killed $eax +; X86-O0-NEXT: xorl %ecx, %ecx ; X86-O0-NEXT: movzbl c, %edx -; X86-O0-NEXT: subl %edx, %eax -; X86-O0-NEXT: movslq %eax, %rsi -; X86-O0-NEXT: subq %rsi, %rcx -; X86-O0-NEXT: # kill: def $cl killed $cl killed $rcx -; X86-O0-NEXT: cmpb $0, %cl -; X86-O0-NEXT: setne %cl -; X86-O0-NEXT: andb $1, %cl -; X86-O0-NEXT: movb %cl, -{{[0-9]+}}(%rsp) +; X86-O0-NEXT: subl %edx, %ecx +; X86-O0-NEXT: movslq %ecx, %rcx +; X86-O0-NEXT: subq %rcx, %rax +; X86-O0-NEXT: # kill: def $al killed $al killed $rax +; X86-O0-NEXT: cmpb $0, %al +; X86-O0-NEXT: setne %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movb %al, -{{[0-9]+}}(%rsp) ; X86-O0-NEXT: cmpb $0, c -; X86-O0-NEXT: setne %cl -; X86-O0-NEXT: xorb $-1, %cl -; X86-O0-NEXT: xorb $-1, %cl -; X86-O0-NEXT: andb $1, %cl -; X86-O0-NEXT: movzbl %cl, %eax -; X86-O0-NEXT: movzbl c, %edx -; X86-O0-NEXT: cmpl %edx, %eax -; X86-O0-NEXT: setle %cl -; X86-O0-NEXT: andb $1, %cl -; X86-O0-NEXT: movzbl %cl, %eax +; X86-O0-NEXT: setne %al +; X86-O0-NEXT: xorb $-1, %al +; X86-O0-NEXT: xorb $-1, %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movzbl %al, %eax +; X86-O0-NEXT: movzbl c, %ecx +; X86-O0-NEXT: cmpl %ecx, %eax +; X86-O0-NEXT: setle %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movzbl %al, %eax ; X86-O0-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ; X86-O0-NEXT: retq ; @@ -63,13 +63,13 @@ ; 686-O0-NEXT: xorb $-1, %al ; 686-O0-NEXT: xorb $-1, %al ; 686-O0-NEXT: andb $1, %al -; 686-O0-NEXT: movzbl %al, %ecx -; 686-O0-NEXT: movzbl c, %edx -; 686-O0-NEXT: cmpl %edx, %ecx +; 686-O0-NEXT: movzbl %al, %eax +; 686-O0-NEXT: movzbl c, %ecx +; 686-O0-NEXT: cmpl %ecx, %eax ; 686-O0-NEXT: setle %al ; 686-O0-NEXT: andb $1, %al -; 686-O0-NEXT: movzbl %al, %ecx -; 686-O0-NEXT: movl %ecx, (%esp) +; 686-O0-NEXT: movzbl %al, %eax +; 686-O0-NEXT: movl %eax, (%esp) ; 686-O0-NEXT: addl $8, %esp ; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl @@ -126,33 +126,33 @@ ; X86-O0-NEXT: movabsq $8381627093, %rcx # imm = 0x1F3957AD5 ; X86-O0-NEXT: addq %rcx, %rax ; X86-O0-NEXT: cmpq $0, %rax -; X86-O0-NEXT: setne %dl -; X86-O0-NEXT: andb $1, %dl -; X86-O0-NEXT: movb %dl, -{{[0-9]+}}(%rsp) -; X86-O0-NEXT: movl var_5, %esi -; X86-O0-NEXT: xorl $-1, %esi -; X86-O0-NEXT: cmpl $0, %esi -; X86-O0-NEXT: setne %dl -; X86-O0-NEXT: xorb $-1, %dl -; X86-O0-NEXT: andb $1, %dl -; X86-O0-NEXT: movzbl %dl, %esi -; X86-O0-NEXT: movl %esi, %eax +; X86-O0-NEXT: setne %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; X86-O0-NEXT: movl var_5, %eax +; X86-O0-NEXT: xorl $-1, %eax +; X86-O0-NEXT: cmpl $0, %eax +; X86-O0-NEXT: setne %al +; X86-O0-NEXT: xorb $-1, %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movzbl %al, %eax +; X86-O0-NEXT: # kill: def $rax killed $eax ; X86-O0-NEXT: movslq var_5, %rcx ; X86-O0-NEXT: addq $7093, %rcx # imm = 0x1BB5 ; X86-O0-NEXT: cmpq %rcx, %rax -; X86-O0-NEXT: setg %dl -; X86-O0-NEXT: andb $1, %dl -; X86-O0-NEXT: movzbl %dl, %esi -; X86-O0-NEXT: movl %esi, %eax +; X86-O0-NEXT: setg %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movzbl %al, %eax +; X86-O0-NEXT: # kill: def $rax killed $eax ; X86-O0-NEXT: movq %rax, var_57 -; X86-O0-NEXT: movl var_5, %esi -; X86-O0-NEXT: xorl $-1, %esi -; X86-O0-NEXT: cmpl $0, %esi -; X86-O0-NEXT: setne %dl -; X86-O0-NEXT: xorb $-1, %dl -; X86-O0-NEXT: andb $1, %dl -; X86-O0-NEXT: movzbl %dl, %esi -; X86-O0-NEXT: movl %esi, %eax +; X86-O0-NEXT: movl var_5, %eax +; X86-O0-NEXT: xorl $-1, %eax +; X86-O0-NEXT: cmpl $0, %eax +; X86-O0-NEXT: setne %al +; X86-O0-NEXT: xorb $-1, %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movzbl %al, %eax +; X86-O0-NEXT: # kill: def $rax killed $eax ; X86-O0-NEXT: movq %rax, _ZN8struct_210member_2_0E ; X86-O0-NEXT: retq ; @@ -178,20 +178,17 @@ ; ; 686-O0-LABEL: f1: ; 686-O0: # %bb.0: # %entry -; 686-O0-NEXT: pushl %ebp -; 686-O0-NEXT: .cfi_def_cfa_offset 8 ; 686-O0-NEXT: pushl %ebx -; 686-O0-NEXT: .cfi_def_cfa_offset 12 +; 686-O0-NEXT: .cfi_def_cfa_offset 8 ; 686-O0-NEXT: pushl %edi -; 686-O0-NEXT: .cfi_def_cfa_offset 16 +; 686-O0-NEXT: .cfi_def_cfa_offset 12 ; 686-O0-NEXT: pushl %esi -; 686-O0-NEXT: .cfi_def_cfa_offset 20 +; 686-O0-NEXT: .cfi_def_cfa_offset 16 ; 686-O0-NEXT: subl $1, %esp -; 686-O0-NEXT: .cfi_def_cfa_offset 21 -; 686-O0-NEXT: .cfi_offset %esi, -20 -; 686-O0-NEXT: .cfi_offset %edi, -16 -; 686-O0-NEXT: .cfi_offset %ebx, -12 -; 686-O0-NEXT: .cfi_offset %ebp, -8 +; 686-O0-NEXT: .cfi_def_cfa_offset 17 +; 686-O0-NEXT: .cfi_offset %esi, -16 +; 686-O0-NEXT: .cfi_offset %edi, -12 +; 686-O0-NEXT: .cfi_offset %ebx, -8 ; 686-O0-NEXT: movl var_5, %eax ; 686-O0-NEXT: movl %eax, %ecx ; 686-O0-NEXT: sarl $31, %ecx @@ -217,18 +214,16 @@ ; 686-O0-NEXT: movl var_5, %edi ; 686-O0-NEXT: subl $-1, %edi ; 686-O0-NEXT: sete %bl -; 686-O0-NEXT: movzbl %bl, %ebp -; 686-O0-NEXT: movl %ebp, _ZN8struct_210member_2_0E +; 686-O0-NEXT: movzbl %bl, %ebx +; 686-O0-NEXT: movl %ebx, _ZN8struct_210member_2_0E ; 686-O0-NEXT: movl $0, _ZN8struct_210member_2_0E+4 ; 686-O0-NEXT: addl $1, %esp -; 686-O0-NEXT: .cfi_def_cfa_offset 20 -; 686-O0-NEXT: popl %esi ; 686-O0-NEXT: .cfi_def_cfa_offset 16 -; 686-O0-NEXT: popl %edi +; 686-O0-NEXT: popl %esi ; 686-O0-NEXT: .cfi_def_cfa_offset 12 -; 686-O0-NEXT: popl %ebx +; 686-O0-NEXT: popl %edi ; 686-O0-NEXT: .cfi_def_cfa_offset 8 -; 686-O0-NEXT: popl %ebp +; 686-O0-NEXT: popl %ebx ; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl ; @@ -310,25 +305,25 @@ ; X86-O0-NEXT: setne %cl ; X86-O0-NEXT: xorb $-1, %cl ; X86-O0-NEXT: andb $1, %cl -; X86-O0-NEXT: movzbl %cl, %edx -; X86-O0-NEXT: xorl %edx, %eax +; X86-O0-NEXT: movzbl %cl, %ecx +; X86-O0-NEXT: xorl %ecx, %eax ; X86-O0-NEXT: # kill: def $ax killed $ax killed $eax ; X86-O0-NEXT: movw %ax, -{{[0-9]+}}(%rsp) -; X86-O0-NEXT: movzbl var_7, %edx -; X86-O0-NEXT: # kill: def $dx killed $dx killed $edx -; X86-O0-NEXT: cmpw $0, %dx -; X86-O0-NEXT: setne %cl -; X86-O0-NEXT: xorb $-1, %cl -; X86-O0-NEXT: andb $1, %cl -; X86-O0-NEXT: movzbl %cl, %esi -; X86-O0-NEXT: movzbl var_7, %edi -; X86-O0-NEXT: cmpl %edi, %esi -; X86-O0-NEXT: sete %cl -; X86-O0-NEXT: andb $1, %cl -; X86-O0-NEXT: movzbl %cl, %esi -; X86-O0-NEXT: # kill: def $si killed $si killed $esi -; X86-O0-NEXT: # implicit-def: $r8 -; X86-O0-NEXT: movw %si, (%r8) +; X86-O0-NEXT: movzbl var_7, %eax +; X86-O0-NEXT: # kill: def $ax killed $ax killed $eax +; X86-O0-NEXT: cmpw $0, %ax +; X86-O0-NEXT: setne %al +; X86-O0-NEXT: xorb $-1, %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movzbl %al, %eax +; X86-O0-NEXT: movzbl var_7, %ecx +; X86-O0-NEXT: cmpl %ecx, %eax +; X86-O0-NEXT: sete %al +; X86-O0-NEXT: andb $1, %al +; X86-O0-NEXT: movzbl %al, %eax +; X86-O0-NEXT: # kill: def $ax killed $ax killed $eax +; X86-O0-NEXT: # implicit-def: $rcx +; X86-O0-NEXT: movw %ax, (%rcx) ; X86-O0-NEXT: retq ; ; X64-LABEL: f2: @@ -350,43 +345,33 @@ ; ; 686-O0-LABEL: f2: ; 686-O0: # %bb.0: # %entry -; 686-O0-NEXT: pushl %edi -; 686-O0-NEXT: .cfi_def_cfa_offset 8 -; 686-O0-NEXT: pushl %esi -; 686-O0-NEXT: .cfi_def_cfa_offset 12 ; 686-O0-NEXT: subl $2, %esp -; 686-O0-NEXT: .cfi_def_cfa_offset 14 -; 686-O0-NEXT: .cfi_offset %esi, -12 -; 686-O0-NEXT: .cfi_offset %edi, -8 +; 686-O0-NEXT: .cfi_def_cfa_offset 6 ; 686-O0-NEXT: movzbl var_7, %eax ; 686-O0-NEXT: cmpb $0, var_7 ; 686-O0-NEXT: setne %cl ; 686-O0-NEXT: xorb $-1, %cl ; 686-O0-NEXT: andb $1, %cl -; 686-O0-NEXT: movzbl %cl, %edx -; 686-O0-NEXT: xorl %edx, %eax +; 686-O0-NEXT: movzbl %cl, %ecx +; 686-O0-NEXT: xorl %ecx, %eax ; 686-O0-NEXT: # kill: def $ax killed $ax killed $eax ; 686-O0-NEXT: movw %ax, (%esp) -; 686-O0-NEXT: movzbl var_7, %edx -; 686-O0-NEXT: # kill: def $dx killed $dx killed $edx -; 686-O0-NEXT: cmpw $0, %dx -; 686-O0-NEXT: setne %cl -; 686-O0-NEXT: xorb $-1, %cl -; 686-O0-NEXT: andb $1, %cl -; 686-O0-NEXT: movzbl %cl, %esi -; 686-O0-NEXT: movzbl var_7, %edi -; 686-O0-NEXT: cmpl %edi, %esi -; 686-O0-NEXT: sete %cl -; 686-O0-NEXT: andb $1, %cl -; 686-O0-NEXT: movzbl %cl, %esi -; 686-O0-NEXT: # kill: def $si killed $si killed $esi -; 686-O0-NEXT: # implicit-def: $edi -; 686-O0-NEXT: movw %si, (%edi) +; 686-O0-NEXT: movzbl var_7, %eax +; 686-O0-NEXT: # kill: def $ax killed $ax killed $eax +; 686-O0-NEXT: cmpw $0, %ax +; 686-O0-NEXT: setne %al +; 686-O0-NEXT: xorb $-1, %al +; 686-O0-NEXT: andb $1, %al +; 686-O0-NEXT: movzbl %al, %eax +; 686-O0-NEXT: movzbl var_7, %ecx +; 686-O0-NEXT: cmpl %ecx, %eax +; 686-O0-NEXT: sete %al +; 686-O0-NEXT: andb $1, %al +; 686-O0-NEXT: movzbl %al, %eax +; 686-O0-NEXT: # kill: def $ax killed $ax killed $eax +; 686-O0-NEXT: # implicit-def: $ecx +; 686-O0-NEXT: movw %ax, (%ecx) ; 686-O0-NEXT: addl $2, %esp -; 686-O0-NEXT: .cfi_def_cfa_offset 12 -; 686-O0-NEXT: popl %esi -; 686-O0-NEXT: .cfi_def_cfa_offset 8 -; 686-O0-NEXT: popl %edi ; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl ; @@ -446,35 +431,35 @@ ; X86-O0-NEXT: movl var_13, %eax ; X86-O0-NEXT: xorl $-1, %eax ; X86-O0-NEXT: movl %eax, %eax -; X86-O0-NEXT: movl %eax, %ecx +; X86-O0-NEXT: # kill: def $rax killed $eax ; X86-O0-NEXT: cmpl $0, var_13 -; X86-O0-NEXT: setne %dl -; X86-O0-NEXT: xorb $-1, %dl -; X86-O0-NEXT: andb $1, %dl -; X86-O0-NEXT: movzbl %dl, %eax -; X86-O0-NEXT: movl %eax, %esi -; X86-O0-NEXT: movl var_13, %eax -; X86-O0-NEXT: xorl $-1, %eax -; X86-O0-NEXT: xorl var_16, %eax -; X86-O0-NEXT: movl %eax, %eax -; X86-O0-NEXT: movl %eax, %edi -; X86-O0-NEXT: andq %rdi, %rsi -; X86-O0-NEXT: orq %rsi, %rcx -; X86-O0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) +; X86-O0-NEXT: setne %cl +; X86-O0-NEXT: xorb $-1, %cl +; X86-O0-NEXT: andb $1, %cl +; X86-O0-NEXT: movzbl %cl, %ecx +; X86-O0-NEXT: # kill: def $rcx killed $ecx +; X86-O0-NEXT: movl var_13, %edx +; X86-O0-NEXT: xorl $-1, %edx +; X86-O0-NEXT: xorl var_16, %edx +; X86-O0-NEXT: movl %edx, %edx +; X86-O0-NEXT: # kill: def $rdx killed $edx +; X86-O0-NEXT: andq %rdx, %rcx +; X86-O0-NEXT: orq %rcx, %rax +; X86-O0-NEXT: movq %rax, -{{[0-9]+}}(%rsp) ; X86-O0-NEXT: movl var_13, %eax ; X86-O0-NEXT: xorl $-1, %eax ; X86-O0-NEXT: movl %eax, %eax -; X86-O0-NEXT: movl %eax, %ecx +; X86-O0-NEXT: # kill: def $rax killed $eax ; X86-O0-NEXT: cmpl $0, var_13 -; X86-O0-NEXT: setne %dl -; X86-O0-NEXT: xorb $-1, %dl -; X86-O0-NEXT: andb $1, %dl -; X86-O0-NEXT: movzbl %dl, %eax -; X86-O0-NEXT: movl %eax, %esi -; X86-O0-NEXT: andq $0, %rsi -; X86-O0-NEXT: orq %rsi, %rcx -; X86-O0-NEXT: # kill: def $ecx killed $ecx killed $rcx -; X86-O0-NEXT: movl %ecx, var_46 +; X86-O0-NEXT: setne %cl +; X86-O0-NEXT: xorb $-1, %cl +; X86-O0-NEXT: andb $1, %cl +; X86-O0-NEXT: movzbl %cl, %ecx +; X86-O0-NEXT: # kill: def $rcx killed $ecx +; X86-O0-NEXT: andq $0, %rcx +; X86-O0-NEXT: orq %rcx, %rax +; X86-O0-NEXT: # kill: def $eax killed $eax killed $rax +; X86-O0-NEXT: movl %eax, var_46 ; X86-O0-NEXT: retq ; ; X64-LABEL: f3: @@ -499,31 +484,28 @@ ; 686-O0-NEXT: .cfi_offset %ebp, -8 ; 686-O0-NEXT: movl %esp, %ebp ; 686-O0-NEXT: .cfi_def_cfa_register %ebp -; 686-O0-NEXT: pushl %edi ; 686-O0-NEXT: pushl %esi ; 686-O0-NEXT: andl $-8, %esp -; 686-O0-NEXT: subl $8, %esp -; 686-O0-NEXT: .cfi_offset %esi, -16 -; 686-O0-NEXT: .cfi_offset %edi, -12 +; 686-O0-NEXT: subl $16, %esp +; 686-O0-NEXT: .cfi_offset %esi, -12 ; 686-O0-NEXT: movl var_13, %eax ; 686-O0-NEXT: movl %eax, %ecx ; 686-O0-NEXT: notl %ecx ; 686-O0-NEXT: testl %eax, %eax -; 686-O0-NEXT: sete %dl -; 686-O0-NEXT: movzbl %dl, %eax -; 686-O0-NEXT: movl var_16, %esi -; 686-O0-NEXT: movl %ecx, %edi -; 686-O0-NEXT: xorl %esi, %edi -; 686-O0-NEXT: andl %edi, %eax +; 686-O0-NEXT: sete %al +; 686-O0-NEXT: movzbl %al, %eax +; 686-O0-NEXT: movl var_16, %edx +; 686-O0-NEXT: movl %ecx, %esi +; 686-O0-NEXT: xorl %edx, %esi +; 686-O0-NEXT: andl %esi, %eax ; 686-O0-NEXT: orl %eax, %ecx ; 686-O0-NEXT: movl %ecx, (%esp) ; 686-O0-NEXT: movl $0, {{[0-9]+}}(%esp) ; 686-O0-NEXT: movl var_13, %eax ; 686-O0-NEXT: notl %eax ; 686-O0-NEXT: movl %eax, var_46 -; 686-O0-NEXT: leal -8(%ebp), %esp +; 686-O0-NEXT: leal -4(%ebp), %esp ; 686-O0-NEXT: popl %esi -; 686-O0-NEXT: popl %edi ; 686-O0-NEXT: popl %ebp ; 686-O0-NEXT: .cfi_def_cfa %esp, 4 ; 686-O0-NEXT: retl Index: test/CodeGen/X86/pr32340.ll =================================================================== --- test/CodeGen/X86/pr32340.ll +++ test/CodeGen/X86/pr32340.ll @@ -14,37 +14,37 @@ ; X64-LABEL: foo: ; X64: # %bb.0: # %entry ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: movl %eax, %ecx +; X64-NEXT: # kill: def $rax killed $eax ; X64-NEXT: movw $0, var_825 -; X64-NEXT: movzwl var_32, %eax +; X64-NEXT: movzwl var_32, %ecx ; X64-NEXT: movzwl var_901, %edx -; X64-NEXT: movl %eax, %esi +; X64-NEXT: movl %ecx, %esi ; X64-NEXT: xorl %edx, %esi -; X64-NEXT: movl %eax, %edx +; X64-NEXT: movl %ecx, %edx ; X64-NEXT: xorl %esi, %edx -; X64-NEXT: addl %eax, %edx -; X64-NEXT: movslq %edx, %rdi -; X64-NEXT: movq %rdi, var_826 -; X64-NEXT: movzwl var_32, %eax -; X64-NEXT: movl %eax, %edi -; X64-NEXT: movzwl var_901, %eax -; X64-NEXT: xorl $51981, %eax # imm = 0xCB0D -; X64-NEXT: movslq %eax, %r8 -; X64-NEXT: movabsq $-1142377792914660288, %r9 # imm = 0xF02575732E06E440 -; X64-NEXT: xorq %r9, %r8 -; X64-NEXT: movq %rdi, %r9 -; X64-NEXT: xorq %r8, %r9 -; X64-NEXT: xorq $-1, %r9 -; X64-NEXT: xorq %r9, %rdi -; X64-NEXT: movq %rdi, %r8 -; X64-NEXT: orq var_57, %r8 -; X64-NEXT: orq %r8, %rdi -; X64-NEXT: # kill: def $di killed $di killed $rdi -; X64-NEXT: movw %di, var_900 -; X64-NEXT: cmpq var_28, %rcx -; X64-NEXT: setne %r10b -; X64-NEXT: andb $1, %r10b -; X64-NEXT: movzbl %r10b, %eax +; X64-NEXT: addl %ecx, %edx +; X64-NEXT: movslq %edx, %rcx +; X64-NEXT: movq %rcx, var_826 +; X64-NEXT: movzwl var_32, %ecx +; X64-NEXT: # kill: def $rcx killed $ecx +; X64-NEXT: movzwl var_901, %edx +; X64-NEXT: xorl $51981, %edx # imm = 0xCB0D +; X64-NEXT: movslq %edx, %rdx +; X64-NEXT: movabsq $-1142377792914660288, %rsi # imm = 0xF02575732E06E440 +; X64-NEXT: xorq %rsi, %rdx +; X64-NEXT: movq %rcx, %rsi +; X64-NEXT: xorq %rdx, %rsi +; X64-NEXT: xorq $-1, %rsi +; X64-NEXT: xorq %rsi, %rcx +; X64-NEXT: movq %rcx, %rdx +; X64-NEXT: orq var_57, %rdx +; X64-NEXT: orq %rdx, %rcx +; X64-NEXT: # kill: def $cx killed $cx killed $rcx +; X64-NEXT: movw %cx, var_900 +; X64-NEXT: cmpq var_28, %rax +; X64-NEXT: setne %al +; X64-NEXT: andb $1, %al +; X64-NEXT: movzbl %al, %eax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: movw %ax, var_827 ; X64-NEXT: retq Index: test/CodeGen/X86/pr32345.ll =================================================================== --- test/CodeGen/X86/pr32345.ll +++ test/CodeGen/X86/pr32345.ll @@ -15,23 +15,23 @@ ; X640-NEXT: xorl %ecx, %eax ; X640-NEXT: movzwl var_27, %ecx ; X640-NEXT: xorl %ecx, %eax -; X640-NEXT: movslq %eax, %rdx -; X640-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) +; X640-NEXT: cltq +; X640-NEXT: movq %rax, -{{[0-9]+}}(%rsp) ; X640-NEXT: movzwl var_22, %eax ; X640-NEXT: movzwl var_27, %ecx ; X640-NEXT: xorl %ecx, %eax ; X640-NEXT: movzwl var_27, %ecx ; X640-NEXT: xorl %ecx, %eax -; X640-NEXT: movslq %eax, %rdx -; X640-NEXT: movzwl var_27, %eax -; X640-NEXT: subl $16610, %eax # imm = 0x40E2 -; X640-NEXT: movl %eax, %eax -; X640-NEXT: movl %eax, %ecx +; X640-NEXT: cltq +; X640-NEXT: movzwl var_27, %ecx +; X640-NEXT: subl $16610, %ecx # imm = 0x40E2 +; X640-NEXT: movl %ecx, %ecx +; X640-NEXT: # kill: def $rcx killed $ecx ; X640-NEXT: # kill: def $cl killed $rcx -; X640-NEXT: sarq %cl, %rdx -; X640-NEXT: # kill: def $dl killed $dl killed $rdx -; X640-NEXT: # implicit-def: $rsi -; X640-NEXT: movb %dl, (%rsi) +; X640-NEXT: sarq %cl, %rax +; X640-NEXT: # kill: def $al killed $al killed $rax +; X640-NEXT: # implicit-def: $rcx +; X640-NEXT: movb %al, (%rcx) ; X640-NEXT: retq ; ; 6860-LABEL: foo: @@ -41,43 +41,37 @@ ; 6860-NEXT: .cfi_offset %ebp, -8 ; 6860-NEXT: movl %esp, %ebp ; 6860-NEXT: .cfi_def_cfa_register %ebp -; 6860-NEXT: pushl %ebx -; 6860-NEXT: pushl %edi -; 6860-NEXT: pushl %esi ; 6860-NEXT: andl $-8, %esp -; 6860-NEXT: subl $32, %esp -; 6860-NEXT: .cfi_offset %esi, -20 -; 6860-NEXT: .cfi_offset %edi, -16 -; 6860-NEXT: .cfi_offset %ebx, -12 +; 6860-NEXT: subl $24, %esp ; 6860-NEXT: movw var_22, %ax ; 6860-NEXT: movzwl var_27, %ecx ; 6860-NEXT: movw %cx, %dx ; 6860-NEXT: xorw %dx, %ax -; 6860-NEXT: # implicit-def: $esi -; 6860-NEXT: movw %ax, %si -; 6860-NEXT: xorl %ecx, %esi -; 6860-NEXT: # kill: def $si killed $si killed $esi -; 6860-NEXT: movzwl %si, %ecx -; 6860-NEXT: movl %ecx, {{[0-9]+}}(%esp) +; 6860-NEXT: # implicit-def: $edx +; 6860-NEXT: movw %ax, %dx +; 6860-NEXT: xorl %ecx, %edx +; 6860-NEXT: # kill: def $dx killed $dx killed $edx +; 6860-NEXT: movzwl %dx, %eax +; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) ; 6860-NEXT: movl $0, {{[0-9]+}}(%esp) ; 6860-NEXT: movw var_22, %ax ; 6860-NEXT: movzwl var_27, %ecx ; 6860-NEXT: movw %cx, %dx ; 6860-NEXT: xorw %dx, %ax -; 6860-NEXT: # implicit-def: $edi -; 6860-NEXT: movw %ax, %di -; 6860-NEXT: xorl %ecx, %edi -; 6860-NEXT: # kill: def $di killed $di killed $edi -; 6860-NEXT: movzwl %di, %ebx +; 6860-NEXT: # implicit-def: $edx +; 6860-NEXT: movw %ax, %dx +; 6860-NEXT: xorl %ecx, %edx +; 6860-NEXT: # kill: def $dx killed $dx killed $edx +; 6860-NEXT: movzwl %dx, %eax ; 6860-NEXT: # kill: def $cl killed $cl killed $ecx ; 6860-NEXT: addb $30, %cl -; 6860-NEXT: xorl %eax, %eax +; 6860-NEXT: xorl %edx, %edx ; 6860-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill -; 6860-NEXT: shrdl %cl, %eax, %ebx +; 6860-NEXT: shrdl %cl, %edx, %eax ; 6860-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload ; 6860-NEXT: testb $32, %cl -; 6860-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; 6860-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; 6860-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; 6860-NEXT: jne .LBB0_2 ; 6860-NEXT: # %bb.1: # %bb ; 6860-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload @@ -87,10 +81,7 @@ ; 6860-NEXT: # kill: def $al killed $al killed $eax ; 6860-NEXT: # implicit-def: $ecx ; 6860-NEXT: movb %al, (%ecx) -; 6860-NEXT: leal -12(%ebp), %esp -; 6860-NEXT: popl %esi -; 6860-NEXT: popl %edi -; 6860-NEXT: popl %ebx +; 6860-NEXT: movl %ebp, %esp ; 6860-NEXT: popl %ebp ; 6860-NEXT: .cfi_def_cfa %esp, 4 ; 6860-NEXT: retl Index: test/CodeGen/X86/pr32451.ll =================================================================== --- test/CodeGen/X86/pr32451.ll +++ test/CodeGen/X86/pr32451.ll @@ -9,29 +9,24 @@ define i8** @japi1_convert_690(i8**, i8***, i32) { ; CHECK-LABEL: japi1_convert_690: ; CHECK: # %bb.0: # %top -; CHECK-NEXT: pushl %ebx -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: subl $16, %esp -; CHECK-NEXT: .cfi_def_cfa_offset 24 -; CHECK-NEXT: .cfi_offset %ebx, -8 +; CHECK-NEXT: .cfi_def_cfa_offset 20 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; CHECK-NEXT: calll julia.gc_root_decl -; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; CHECK-NEXT: calll jl_get_ptls_states -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; CHECK-NEXT: movl 4(%ecx), %edx -; CHECK-NEXT: movb (%edx), %bl -; CHECK-NEXT: andb $1, %bl -; CHECK-NEXT: movzbl %bl, %edx +; CHECK-NEXT: movb (%edx), %dl +; CHECK-NEXT: andb $1, %dl +; CHECK-NEXT: movzbl %dl, %edx ; CHECK-NEXT: movl %edx, (%esp) -; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; CHECK-NEXT: calll jl_box_int32 -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; CHECK-NEXT: movl %eax, (%ecx) ; CHECK-NEXT: addl $16, %esp -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: popl %ebx ; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl top: Index: test/CodeGen/X86/pr34592.ll =================================================================== --- test/CodeGen/X86/pr34592.ll +++ test/CodeGen/X86/pr34592.ll @@ -24,24 +24,24 @@ ; CHECK-NEXT: vmovaps %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; CHECK-NEXT: # implicit-def: $ymm0 ; CHECK-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm0 -; CHECK-NEXT: vpalignr {{.*#+}} ymm11 = ymm2[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23] -; CHECK-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,0] -; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5],ymm11[6,7] +; CHECK-NEXT: vpalignr {{.*#+}} ymm6 = ymm2[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23] +; CHECK-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,2,0] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5],ymm6[6,7] ; CHECK-NEXT: # kill: def $xmm2 killed $xmm2 killed $ymm2 -; CHECK-NEXT: # implicit-def: $ymm11 -; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm11, %ymm11 -; CHECK-NEXT: vextracti128 $1, %ymm7, %xmm2 -; CHECK-NEXT: vmovq {{.*#+}} xmm2 = xmm2[0],zero ; CHECK-NEXT: # implicit-def: $ymm6 -; CHECK-NEXT: vmovaps %xmm2, %xmm6 -; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm11[4,5,6,7] +; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm6, %ymm2 +; CHECK-NEXT: vextracti128 $1, %ymm7, %xmm6 +; CHECK-NEXT: vmovq {{.*#+}} xmm6 = xmm6[0],zero +; CHECK-NEXT: # implicit-def: $ymm11 +; CHECK-NEXT: vmovaps %xmm6, %xmm11 +; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm2[4,5,6,7] ; CHECK-NEXT: vmovaps %xmm7, %xmm6 ; CHECK-NEXT: vpslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,xmm6[0,1,2,3,4,5,6,7] ; CHECK-NEXT: # implicit-def: $ymm11 ; CHECK-NEXT: vmovaps %xmm6, %xmm11 -; CHECK-NEXT: vpalignr {{.*#+}} ymm9 = ymm9[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23] -; CHECK-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,3] -; CHECK-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7] +; CHECK-NEXT: vpalignr {{.*#+}} ymm6 = ymm9[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23] +; CHECK-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm6 = ymm11[0,1,2,3],ymm6[4,5,6,7] ; CHECK-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7] ; CHECK-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,1,3] ; CHECK-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[0,1,0,1,4,5,4,5] @@ -49,7 +49,7 @@ ; CHECK-NEXT: vmovaps %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm5, %ymm1 ; CHECK-NEXT: vmovaps %ymm3, (%rsp) # 32-byte Spill -; CHECK-NEXT: vmovaps %ymm9, %ymm3 +; CHECK-NEXT: vmovaps %ymm6, %ymm3 ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp ; CHECK-NEXT: .cfi_def_cfa %rsp, 8 Index: test/CodeGen/X86/pr39733.ll =================================================================== --- test/CodeGen/X86/pr39733.ll +++ test/CodeGen/X86/pr39733.ll @@ -23,8 +23,8 @@ ; CHECK-NEXT: vmovaps %xmm1, %xmm2 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; CHECK-NEXT: vpmovsxwd %xmm0, %xmm0 -; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm2 -; CHECK-NEXT: vmovdqa %ymm2, (%rsp) +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; CHECK-NEXT: vmovdqa %ymm0, (%rsp) ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp ; CHECK-NEXT: .cfi_def_cfa %rsp, 8 Index: test/CodeGen/X86/regalloc-fast-missing-live-out-spill.mir =================================================================== --- test/CodeGen/X86/regalloc-fast-missing-live-out-spill.mir +++ test/CodeGen/X86/regalloc-fast-missing-live-out-spill.mir @@ -23,15 +23,15 @@ ; CHECK: successors: %bb.3(0x80000000) ; CHECK: $rax = MOV64rm %stack.1, 1, $noreg, 0, $noreg :: (load 8 from %stack.1) ; CHECK: renamable $ecx = MOV32r0 implicit-def $eflags - ; CHECK: renamable $rdx = SUBREG_TO_REG 0, killed renamable $ecx, %subreg.sub_32bit + ; CHECK: renamable $rcx = SUBREG_TO_REG 0, killed renamable $ecx, %subreg.sub_32bit ; CHECK: MOV64mi32 killed renamable $rax, 1, $noreg, 0, $noreg, 0 :: (volatile store 8) - ; CHECK: MOV64mr %stack.0, 1, $noreg, 0, $noreg, killed $rdx :: (store 8 into %stack.0) + ; CHECK: MOV64mr %stack.0, 1, $noreg, 0, $noreg, killed $rcx :: (store 8 into %stack.0) ; CHECK: bb.3: ; CHECK: successors: %bb.2(0x40000000), %bb.1(0x40000000) ; CHECK: $rax = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) ; CHECK: renamable $ecx = MOV32r0 implicit-def dead $eflags - ; CHECK: renamable $rdx = SUBREG_TO_REG 0, killed renamable $ecx, %subreg.sub_32bit - ; CHECK: MOV64mr %stack.1, 1, $noreg, 0, $noreg, killed $rdx :: (store 8 into %stack.1) + ; CHECK: renamable $rcx = SUBREG_TO_REG 0, killed renamable $ecx, %subreg.sub_32bit + ; CHECK: MOV64mr %stack.1, 1, $noreg, 0, $noreg, killed $rcx :: (store 8 into %stack.1) ; CHECK: JMP64r killed renamable $rax bb.0: liveins: $edi, $rsi Index: test/CodeGen/X86/swift-return.ll =================================================================== --- test/CodeGen/X86/swift-return.ll +++ test/CodeGen/X86/swift-return.ll @@ -28,11 +28,10 @@ ; CHECK-O0-NEXT: movl %edi, {{[0-9]+}}(%rsp) ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %edi ; CHECK-O0-NEXT: callq gen -; CHECK-O0-NEXT: movswl %ax, %ecx -; CHECK-O0-NEXT: movsbl %dl, %esi -; CHECK-O0-NEXT: addl %esi, %ecx -; CHECK-O0-NEXT: # kill: def $cx killed $cx killed $ecx -; CHECK-O0-NEXT: movw %cx, %ax +; CHECK-O0-NEXT: cwtl +; CHECK-O0-NEXT: movsbl %dl, %ecx +; CHECK-O0-NEXT: addl %ecx, %eax +; CHECK-O0-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-O0-NEXT: popq %rcx ; CHECK-O0-NEXT: .cfi_def_cfa_offset 8 ; CHECK-O0-NEXT: retq @@ -80,16 +79,16 @@ ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %edi ; CHECK-O0-NEXT: movq %rsp, %rax ; CHECK-O0-NEXT: callq gen2 +; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %eax ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %ecx ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %edx -; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %esi -; CHECK-O0-NEXT: movl (%rsp), %edi -; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %r8d -; CHECK-O0-NEXT: addl %r8d, %edi -; CHECK-O0-NEXT: addl %esi, %edi -; CHECK-O0-NEXT: addl %edx, %edi -; CHECK-O0-NEXT: addl %ecx, %edi -; CHECK-O0-NEXT: movl %edi, %eax +; CHECK-O0-NEXT: movl (%rsp), %esi +; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %edi +; CHECK-O0-NEXT: addl %edi, %esi +; CHECK-O0-NEXT: addl %edx, %esi +; CHECK-O0-NEXT: addl %ecx, %esi +; CHECK-O0-NEXT: addl %eax, %esi +; CHECK-O0-NEXT: movl %esi, %eax ; CHECK-O0-NEXT: addq $24, %rsp ; CHECK-O0-NEXT: .cfi_def_cfa_offset 8 ; CHECK-O0-NEXT: retq @@ -264,17 +263,17 @@ ; CHECK-O0-NEXT: .cfi_def_cfa_offset 16 ; CHECK-O0-NEXT: callq produce_i1_ret ; CHECK-O0-NEXT: andb $1, %al -; CHECK-O0-NEXT: movzbl %al, %esi -; CHECK-O0-NEXT: movl %esi, var +; CHECK-O0-NEXT: movzbl %al, %eax +; CHECK-O0-NEXT: movl %eax, var ; CHECK-O0-NEXT: andb $1, %dl -; CHECK-O0-NEXT: movzbl %dl, %esi -; CHECK-O0-NEXT: movl %esi, var +; CHECK-O0-NEXT: movzbl %dl, %eax +; CHECK-O0-NEXT: movl %eax, var ; CHECK-O0-NEXT: andb $1, %cl -; CHECK-O0-NEXT: movzbl %cl, %esi -; CHECK-O0-NEXT: movl %esi, var +; CHECK-O0-NEXT: movzbl %cl, %eax +; CHECK-O0-NEXT: movl %eax, var ; CHECK-O0-NEXT: andb $1, %r8b -; CHECK-O0-NEXT: movzbl %r8b, %esi -; CHECK-O0-NEXT: movl %esi, var +; CHECK-O0-NEXT: movzbl %r8b, %eax +; CHECK-O0-NEXT: movl %eax, var ; CHECK-O0-NEXT: popq %rax ; CHECK-O0-NEXT: .cfi_def_cfa_offset 8 ; CHECK-O0-NEXT: retq Index: test/CodeGen/X86/swifterror.ll =================================================================== --- test/CodeGen/X86/swifterror.ll +++ test/CodeGen/X86/swifterror.ll @@ -790,7 +790,6 @@ ; CHECK-O0-LABEL: testAssign4 ; CHECK-O0: callq _foo2 ; CHECK-O0: xorl %eax, %eax -; CHECK-O0: movl %eax, %ecx ; CHECK-O0: movq %rcx, [[SLOT:[-a-z0-9\(\)\%]*]] ; CHECK-O0: movq [[SLOT]], %rax ; CHECK-O0: movq %rax, [[SLOT2:[-a-z0-9\(\)\%]*]] Index: test/DebugInfo/X86/op_deref.ll =================================================================== --- test/DebugInfo/X86/op_deref.ll +++ test/DebugInfo/X86/op_deref.ll @@ -6,10 +6,10 @@ ; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=DWARF3 ; DWARF4: DW_AT_location [DW_FORM_sec_offset] (0x00000000 -; DWARF4-NEXT: {{.*}}: DW_OP_breg1 RDX+0, DW_OP_deref +; DWARF4-NEXT: {{.*}}: DW_OP_breg2 RCX+0, DW_OP_deref ; DWARF3: DW_AT_location [DW_FORM_data4] (0x00000000 -; DWARF3-NEXT: {{.*}}: DW_OP_breg1 RDX+0, DW_OP_deref +; DWARF3-NEXT: {{.*}}: DW_OP_breg2 RCX+0, DW_OP_deref ; CHECK-NOT: DW_TAG ; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000067] = "vla") @@ -17,8 +17,8 @@ ; Check the DEBUG_VALUE comments for good measure. ; RUN: llc -O0 -mtriple=x86_64-apple-darwin %s -o - -filetype=asm | FileCheck %s -check-prefix=ASM-CHECK ; vla should have a register-indirect address at one point. -; ASM-CHECK: DEBUG_VALUE: vla <- [DW_OP_deref] [$rdx+0] -; ASM-CHECK: DW_OP_breg1 +; ASM-CHECK: DEBUG_VALUE: vla <- [DW_OP_deref] [$rcx+0] +; ASM-CHECK: DW_OP_breg2 ; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s --check-prefix=PRETTY-PRINT ; PRETTY-PRINT: DIExpression(DW_OP_deref)