Index: llvm/trunk/lib/CodeGen/MachineCopyPropagation.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineCopyPropagation.cpp +++ llvm/trunk/lib/CodeGen/MachineCopyPropagation.cpp @@ -9,6 +9,35 @@ // // This is an extremely simple MachineInstr-level copy propagation pass. // +// This pass forwards the source of COPYs to the users of their destinations +// when doing so is legal. For example: +// +// %reg1 = COPY %reg0 +// ... +// ... = OP %reg1 +// +// If +// - %reg0 has not been clobbered by the time of the use of %reg1 +// - the register class constraints are satisfied +// - the COPY def is the only value that reaches OP +// then this pass replaces the above with: +// +// %reg1 = COPY %reg0 +// ... +// ... = OP %reg0 +// +// This pass also removes some redundant COPYs. For example: +// +// %R1 = COPY %R0 +// ... // No clobber of %R1 +// %R0 = COPY %R1 <<< Removed +// +// or +// +// %R1 = COPY %R0 +// ... // No clobber of %R0 +// %R1 = COPY %R0 <<< Removed +// //===----------------------------------------------------------------------===// #include "llvm/ADT/DenseMap.h" @@ -23,11 +52,13 @@ #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/DebugCounter.h" #include "llvm/Support/raw_ostream.h" #include #include @@ -37,6 +68,9 @@ #define DEBUG_TYPE "machine-cp" STATISTIC(NumDeletes, "Number of dead copies deleted"); +STATISTIC(NumCopyForwards, "Number of copy uses forwarded"); +DEBUG_COUNTER(FwdCounter, "machine-cp-fwd", + "Controls which register COPYs are forwarded"); namespace { @@ -73,6 +107,10 @@ void ReadRegister(unsigned Reg); void CopyPropagateBlock(MachineBasicBlock &MBB); bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def); + void forwardUses(MachineInstr &MI); + bool isForwardableRegClassCopy(const MachineInstr &Copy, + const MachineInstr &UseI, unsigned UseIdx); + bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use); /// Candidates for deletion. SmallSetVector MaybeDeadCopies; @@ -208,6 +246,152 @@ return true; } +/// Decide whether we should forward the source of \param Copy to its use in +/// \param UseI based on the physical register class constraints of the opcode +/// and avoiding introducing more cross-class COPYs. +bool MachineCopyPropagation::isForwardableRegClassCopy(const MachineInstr &Copy, + const MachineInstr &UseI, + unsigned UseIdx) { + + unsigned CopySrcReg = Copy.getOperand(1).getReg(); + + // If the new register meets the opcode register constraints, then allow + // forwarding. + if (const TargetRegisterClass *URC = + UseI.getRegClassConstraint(UseIdx, TII, TRI)) + return URC->contains(CopySrcReg); + + if (!UseI.isCopy()) + return false; + + /// COPYs don't have register class constraints, so if the user instruction + /// is a COPY, we just try to avoid introducing additional cross-class + /// COPYs. For example: + /// + /// RegClassA = COPY RegClassB // Copy parameter + /// ... + /// RegClassB = COPY RegClassA // UseI parameter + /// + /// which after forwarding becomes + /// + /// RegClassA = COPY RegClassB + /// ... + /// RegClassB = COPY RegClassB + /// + /// so we have reduced the number of cross-class COPYs and potentially + /// introduced a nop COPY that can be removed. + const TargetRegisterClass *UseDstRC = + TRI->getMinimalPhysRegClass(UseI.getOperand(0).getReg()); + + const TargetRegisterClass *SuperRC = UseDstRC; + for (TargetRegisterClass::sc_iterator SuperRCI = UseDstRC->getSuperClasses(); + SuperRC; SuperRC = *SuperRCI++) + if (SuperRC->contains(CopySrcReg)) + return true; + + return false; +} + +/// Check that \p MI does not have implicit uses that overlap with it's \p Use +/// operand (the register being replaced), since these can sometimes be +/// implicitly tied to other operands. For example, on AMDGPU: +/// +/// V_MOVRELS_B32_e32 %VGPR2, %M0, %EXEC, %VGPR2_VGPR3_VGPR4_VGPR5 +/// +/// the %VGPR2 is implicitly tied to the larger reg operand, but we have no +/// way of knowing we need to update the latter when updating the former. +bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI, + const MachineOperand &Use) { + for (const MachineOperand &MIUse : MI.uses()) + if (&MIUse != &Use && MIUse.isReg() && MIUse.isImplicit() && + MIUse.isUse() && TRI->regsOverlap(Use.getReg(), MIUse.getReg())) + return true; + + return false; +} + +/// Look for available copies whose destination register is used by \p MI and +/// replace the use in \p MI with the copy's source register. +void MachineCopyPropagation::forwardUses(MachineInstr &MI) { + if (AvailCopyMap.empty()) + return; + + // Look for non-tied explicit vreg uses that have an active COPY + // instruction that defines the physical register allocated to them. + // Replace the vreg with the source of the active COPY. + for (unsigned OpIdx = 0, OpEnd = MI.getNumOperands(); OpIdx < OpEnd; + ++OpIdx) { + MachineOperand &MOUse = MI.getOperand(OpIdx); + // Don't forward into undef use operands since doing so can cause problems + // with the machine verifier, since it doesn't treat undef reads as reads, + // so we can end up with a live range that ends on an undef read, leading to + // an error that the live range doesn't end on a read of the live range + // register. + if (!MOUse.isReg() || MOUse.isTied() || MOUse.isUndef() || MOUse.isDef() || + MOUse.isImplicit()) + continue; + + if (!MOUse.getReg()) + continue; + + // Check that the register is marked 'renamable' so we know it is safe to + // rename it without violating any constraints that aren't expressed in the + // IR (e.g. ABI or opcode requirements). + if (!MOUse.isRenamable()) + continue; + + auto CI = AvailCopyMap.find(MOUse.getReg()); + if (CI == AvailCopyMap.end()) + continue; + + MachineInstr &Copy = *CI->second; + unsigned CopyDstReg = Copy.getOperand(0).getReg(); + const MachineOperand &CopySrc = Copy.getOperand(1); + unsigned CopySrcReg = CopySrc.getReg(); + + // FIXME: Don't handle partial uses of wider COPYs yet. + if (MOUse.getReg() != CopyDstReg) { + DEBUG(dbgs() << "MCP: FIXME! Not forwarding COPY to sub-register use:\n " + << MI); + continue; + } + + // Don't forward COPYs of reserved regs unless they are constant. + if (MRI->isReserved(CopySrcReg) && !MRI->isConstantPhysReg(CopySrcReg)) + continue; + + if (!isForwardableRegClassCopy(Copy, MI, OpIdx)) + continue; + + if (hasImplicitOverlap(MI, MOUse)) + continue; + + if (!DebugCounter::shouldExecute(FwdCounter)) { + DEBUG(dbgs() << "MCP: Skipping forwarding due to debug counter:\n " + << MI); + continue; + } + + DEBUG(dbgs() << "MCP: Replacing " << printReg(MOUse.getReg(), TRI) + << "\n with " << printReg(CopySrcReg, TRI) << "\n in " + << MI << " from " << Copy); + + MOUse.setReg(CopySrcReg); + if (!CopySrc.isRenamable()) + MOUse.setIsRenamable(false); + + DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n"); + + // Clear kill markers that may have been invalidated. + for (MachineInstr &KMI : + make_range(Copy.getIterator(), std::next(MI.getIterator()))) + KMI.clearRegisterKills(CopySrcReg, TRI); + + ++NumCopyForwards; + Changed = true; + } +} + void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n"); @@ -241,6 +425,11 @@ if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def)) continue; + forwardUses(*MI); + + // Src may have been changed by forwardUses() + Src = MI->getOperand(1).getReg(); + // If Src is defined by a previous copy, the previous copy cannot be // eliminated. ReadRegister(Src); @@ -292,6 +481,20 @@ continue; } + // Clobber any earlyclobber regs first. + for (const MachineOperand &MO : MI->operands()) + if (MO.isReg() && MO.isEarlyClobber()) { + unsigned Reg = MO.getReg(); + // If we have a tied earlyclobber, that means it is also read by this + // instruction, so we need to make sure we don't remove it as dead + // later. + if (MO.isTied()) + ReadRegister(Reg); + ClobberRegister(Reg); + } + + forwardUses(*MI); + // Not a copy. SmallVector Defs; const MachineOperand *RegMask = nullptr; @@ -307,7 +510,7 @@ assert(!TargetRegisterInfo::isVirtualRegister(Reg) && "MachineCopyPropagation should be run after register allocation!"); - if (MO.isDef()) { + if (MO.isDef() && !MO.isEarlyClobber()) { Defs.push_back(Reg); continue; } else if (MO.readsReg()) @@ -364,6 +567,8 @@ // since we don't want to trust live-in lists. if (MBB.succ_empty()) { for (MachineInstr *MaybeDead : MaybeDeadCopies) { + DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: "; + MaybeDead->dump()); assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg())); MaybeDead->eraseFromParent(); Changed = true; Index: llvm/trunk/lib/CodeGen/TargetPassConfig.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TargetPassConfig.cpp +++ llvm/trunk/lib/CodeGen/TargetPassConfig.cpp @@ -1081,6 +1081,10 @@ // kill markers. addPass(&StackSlotColoringID); + // Copy propagate to forward register uses and try to eliminate COPYs that + // were not coalesced. + addPass(&MachineCopyPropagationID); + // Run post-ra machine LICM to hoist reloads / remats. // // FIXME: can this move into MachineLateOptimization? Index: llvm/trunk/test/CodeGen/AArch64/aarch64-fold-lslfast.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/aarch64-fold-lslfast.ll +++ llvm/trunk/test/CodeGen/AArch64/aarch64-fold-lslfast.ll @@ -9,7 +9,8 @@ ; CHECK-LABEL: halfword: ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1] -; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1] +; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] +; CHECK: strh [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #1] %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 @@ -24,7 +25,8 @@ ; CHECK-LABEL: word: ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2] -; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2] +; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] +; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #2] %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 @@ -39,7 +41,8 @@ ; CHECK-LABEL: doubleword: ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3] -; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3] +; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] +; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #3] %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 Index: llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll @@ -8,15 +8,9 @@ ; CHECK: add.2d v[[REG:[0-9]+]], v0, v1 ; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1 ; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1 -; Without advanced copy optimization, we end up with cross register -; banks copies that cannot be coalesced. -; CHECK-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]] -; With advanced copy optimization, we end up with just one copy -; to insert the computed high part into the V register. -; CHECK-OPT-NOT: fmov +; CHECK-NOT: fmov ; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]] -; CHECK-NOOPT: fmov d0, [[COPY_REG3]] -; CHECK-OPT-NOT: fmov +; CHECK-NOT: fmov ; CHECK: mov.d v0[1], [[COPY_REG2]] ; CHECK-NEXT: ret ; @@ -24,11 +18,9 @@ ; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d ; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1 ; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1 -; GENERIC-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]] -; GENERIC-OPT-NOT: fmov +; GENERIC-NOT: fmov ; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]] -; GENERIC-NOOPT: fmov d0, [[COPY_REG3]] -; GENERIC-OPT-NOT: fmov +; GENERIC-NOT: fmov ; GENERIC: mov v0.d[1], [[COPY_REG2]] ; GENERIC-NEXT: ret %add = add <2 x i64> %a, %b Index: llvm/trunk/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll @@ -4,8 +4,10 @@ define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp { entry: ; CHECK-LABEL: t: -; CHECK: mov x0, [[REG1:x[0-9]+]] -; CHECK: mov x1, [[REG2:x[0-9]+]] +; CHECK: mov [[REG2:x[0-9]+]], x3 +; CHECK: mov [[REG1:x[0-9]+]], x2 +; CHECK: mov x0, x2 +; CHECK: mov x1, x3 ; CHECK: bl _foo ; CHECK: mov x0, [[REG1]] ; CHECK: mov x1, [[REG2]] Index: llvm/trunk/test/CodeGen/AArch64/cmpxchg-idioms.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/cmpxchg-idioms.ll +++ llvm/trunk/test/CodeGen/AArch64/cmpxchg-idioms.ll @@ -45,8 +45,7 @@ ; CHECK: [[FAILED]]: ; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}} -; CHECK: mov [[TMP:w[0-9]+]], wzr -; CHECK: eor w0, [[TMP]], #0x1 +; CHECK: eor w0, wzr, #0x1 ; CHECK: ret %pair = cmpxchg i8* %value, i8 %oldValue, i8 %newValue acq_rel monotonic Index: llvm/trunk/test/CodeGen/AArch64/copyprop.mir =================================================================== --- llvm/trunk/test/CodeGen/AArch64/copyprop.mir +++ llvm/trunk/test/CodeGen/AArch64/copyprop.mir @@ -0,0 +1,104 @@ +# RUN: llc -mtriple=aarch64-linux-gnu -run-pass machine-cp -o - %s | FileCheck %s +# Tests for MachineCopyPropagation copy forwarding. +--- +# Simple forwarding. +# CHECK-LABEL: name: test1 +# CHECK: $x0 = SUBXri $x0, 1, 0 +name: test1 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + renamable $x1 = COPY $x0 + $x0 = SUBXri renamable $x1, 1, 0 +... +--- +# Don't forward if not renamable. +# CHECK-LABEL: name: test2 +# CHECK: $x0 = SUBXri $x1, 1, 0 +name: test2 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + $x1 = COPY $x0 + $x0 = SUBXri $x1, 1, 0 +... +--- +# Don't forward reserved non-constant reg values. +# CHECK-LABEL: name: test4 +# CHECK: $x0 = SUBXri renamable $x1, 1, 0 +name: test4 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + $sp = SUBXri $sp, 16, 0 + renamable $x1 = COPY $sp + $x0 = SUBXri renamable $x1, 1, 0 + $sp = ADDXri $sp, 16, 0 +... +--- +# Don't violate opcode constraints when forwarding. +# CHECK-LABEL: name: test5 +# CHECK: $x0 = SUBXri renamable $x1, 1, 0 +name: test5 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + renamable $x1 = COPY $xzr + $x0 = SUBXri renamable $x1, 1, 0 +... +--- +# Test cross-class COPY forwarding. +# CHECK-LABEL: name: test6 +# CHECK: $x2 = COPY $x0 +name: test6 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + renamable $d1 = COPY $x0 + $x2 = COPY renamable $d1 + RET_ReallyLR implicit $x2 +... +--- +# Don't forward if there are overlapping implicit operands. +# CHECK-LABEL: name: test7 +# CHECK: $w0 = SUBWri killed renamable $w1, 1, 0 +name: test7 +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + renamable $w1 = COPY $w0 + $w0 = SUBWri killed renamable $w1, 1, 0, implicit killed $x1 +... +--- +# Check that kill flags are cleared. +# CHECK-LABEL: name: test8 +# CHECK: $x2 = ADDXri $x0, 1, 0 +# CHECK: $x0 = SUBXri $x0, 1, 0 +name: test8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + renamable $x1 = COPY $x0 + $x2 = ADDXri killed $x0, 1, 0 + $x0 = SUBXri renamable $x1, 1, 0 +... +--- +# Don't forward if value is clobbered. +# CHECK-LABEL: name: test9 +# CHECK: $x2 = SUBXri renamable $x1, 1, 0 +name: test9 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + renamable $x1 = COPY $x0 + $x0 = ADDXri $x0, 1, 0 + $x2 = SUBXri renamable $x1, 1, 0 +... Index: llvm/trunk/test/CodeGen/AArch64/f16-instructions.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/f16-instructions.ll +++ llvm/trunk/test/CodeGen/AArch64/f16-instructions.ll @@ -489,7 +489,7 @@ ; CHECK-COMMON-LABEL: test_phi: ; CHECK-COMMON: mov x[[PTR:[0-9]+]], x0 -; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x[[PTR]]] +; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x0] ; CHECK-COMMON: [[LOOP:LBB[0-9_]+]]: ; CHECK-COMMON: mov.16b v[[R:[0-9]+]], v[[AB]] ; CHECK-COMMON: ldr h[[AB]], [x[[PTR]]] Index: llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll +++ llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll @@ -17,6 +17,9 @@ %val = zext i1 %test to i32 ; CHECK: cset {{[xw][0-9]+}}, ne +; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]] +; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]] + store i32 %val, i32* @var call void @bar() @@ -25,7 +28,7 @@ ; Currently, the comparison is emitted again. An MSR/MRS pair would also be ; acceptable, but assuming the call preserves NZCV is not. br i1 %test, label %iftrue, label %iffalse -; CHECK: cmp [[LHS]], [[RHS]] +; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]] ; CHECK: b.eq iftrue: Index: llvm/trunk/test/CodeGen/AArch64/ldst-opt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/ldst-opt.ll +++ llvm/trunk/test/CodeGen/AArch64/ldst-opt.ll @@ -1671,7 +1671,7 @@ ; CHECK-LABEL: bug34674: ; CHECK: // %entry ; CHECK-NEXT: mov [[ZREG:x[0-9]+]], xzr -; CHECK-DAG: stp [[ZREG]], [[ZREG]], [x0] +; CHECK-DAG: stp xzr, xzr, [x0] ; CHECK-DAG: add x{{[0-9]+}}, [[ZREG]], #1 define i64 @bug34674(<2 x i64>* %p) { entry: Index: llvm/trunk/test/CodeGen/AArch64/merge-store-dependency.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/merge-store-dependency.ll +++ llvm/trunk/test/CodeGen/AArch64/merge-store-dependency.ll @@ -11,7 +11,7 @@ ; A53: mov [[DATA:w[0-9]+]], w1 ; A53: str q{{[0-9]+}}, {{.*}} ; A53: str q{{[0-9]+}}, {{.*}} -; A53: str [[DATA]], {{.*}} +; A53: str w1, {{.*}} %0 = bitcast %struct1* %fde to i8* tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 40, i1 false) Index: llvm/trunk/test/CodeGen/AArch64/neg-imm.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/neg-imm.ll +++ llvm/trunk/test/CodeGen/AArch64/neg-imm.ll @@ -7,8 +7,8 @@ define void @test(i32 %px) { ; CHECK_LABEL: test: ; CHECK_LABEL: %entry -; CHECK: subs -; CHECK-NEXT: csel +; CHECK: subs [[REG0:w[0-9]+]], +; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]] entry: %sub = add nsw i32 %px, -1 %cmp = icmp slt i32 %px, 1 Index: llvm/trunk/test/CodeGen/AMDGPU/callee-special-input-sgprs.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/callee-special-input-sgprs.ll +++ llvm/trunk/test/CodeGen/AMDGPU/callee-special-input-sgprs.ll @@ -547,16 +547,16 @@ ; GCN: s_mov_b32 s5, s32 ; GCN: s_add_u32 s32, s32, 0x300 -; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14 -; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15 -; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16 +; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-57-9][0-9]*]], s14 +; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-68-9][0-9]*]], s15 +; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-79][0-9]*]], s16 ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7] ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9] ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11] -; GCN-DAG: s_mov_b32 s6, [[SAVE_X]] -; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]] -; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]] +; GCN-DAG: s_mov_b32 s6, s14 +; GCN-DAG: s_mov_b32 s7, s15 +; GCN-DAG: s_mov_b32 s8, s16 ; GCN: s_swappc_b64 ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4 Index: llvm/trunk/test/CodeGen/AMDGPU/fix-vgpr-copies.mir =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/fix-vgpr-copies.mir +++ llvm/trunk/test/CodeGen/AMDGPU/fix-vgpr-copies.mir @@ -1,4 +1,4 @@ -# RUN: llc -march=amdgcn -start-after=greedy -stop-after=si-optimize-exec-masking -o - %s | FileCheck %s +# RUN: llc -march=amdgcn -start-after=greedy -disable-copyprop -stop-after=si-optimize-exec-masking -o - %s | FileCheck %s # Check that we first do all vector instructions and only then change exec # CHECK-DAG: COPY $vgpr10_vgpr11 # CHECK-DAG: COPY $vgpr12_vgpr13 Index: llvm/trunk/test/CodeGen/AMDGPU/multilevel-break.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/multilevel-break.ll +++ llvm/trunk/test/CodeGen/AMDGPU/multilevel-break.ll @@ -78,7 +78,7 @@ ; Uses a copy intsead of an or ; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]] -; GCN: s_or_b64 [[BREAK_REG]], exec, [[COPY]] +; GCN: s_or_b64 [[BREAK_REG]], exec, [[BREAK_REG]] define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 { bb: %id = call i32 @llvm.amdgcn.workitem.id.x() Index: llvm/trunk/test/CodeGen/AMDGPU/ret.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/ret.ll +++ llvm/trunk/test/CodeGen/AMDGPU/ret.ll @@ -2,10 +2,10 @@ ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s ; GCN-LABEL: {{^}}vgpr: -; GCN: v_mov_b32_e32 v1, v0 -; GCN-DAG: v_add_f32_e32 v0, 1.0, v1 -; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm +; GCN-DAG: v_mov_b32_e32 v1, v0 +; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm ; GCN: s_waitcnt expcnt(0) +; GCN: v_add_f32_e32 v0, 1.0, v0 ; GCN-NOT: s_endpgm define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 { bb: @@ -204,13 +204,13 @@ } ; GCN-LABEL: {{^}}both: -; GCN: v_mov_b32_e32 v1, v0 -; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm -; GCN-DAG: v_add_f32_e32 v0, 1.0, v1 -; GCN-DAG: s_add_i32 s0, s3, 2 +; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm +; GCN-DAG: v_mov_b32_e32 v1, v0 ; GCN-DAG: s_mov_b32 s1, s2 -; GCN: s_mov_b32 s2, s3 ; GCN: s_waitcnt expcnt(0) +; GCN: v_add_f32_e32 v0, 1.0, v0 +; GCN-DAG: s_add_i32 s0, s3, 2 +; GCN-DAG: s_mov_b32 s2, s3 ; GCN-NOT: s_endpgm define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 { bb: Index: llvm/trunk/test/CodeGen/ARM/atomic-op.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/atomic-op.ll +++ llvm/trunk/test/CodeGen/ARM/atomic-op.ll @@ -287,7 +287,8 @@ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic %oldval = extractvalue { i32, i1 } %pair, 0 -; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]] +; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0 +; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0] ; CHECK-ARMV7: cmp [[OLDVAL]], r1 ; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]] ; CHECK-ARMV7: dmb ish @@ -305,7 +306,8 @@ ; CHECK-ARMV7: dmb ish ; CHECK-ARMV7: bx lr -; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]] +; CHECK-T2: mov r[[ADDR:[0-9]+]], r0 +; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0] ; CHECK-T2: cmp [[OLDVAL]], r1 ; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]] ; CHECK-T2: dmb ish Index: llvm/trunk/test/CodeGen/ARM/intrinsics-overflow.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/intrinsics-overflow.ll +++ llvm/trunk/test/CodeGen/ARM/intrinsics-overflow.ll @@ -39,7 +39,7 @@ ; ARM: mov pc, lr ; THUMBV6: mov r[[R2:[0-9]+]], r[[R0:[0-9]+]] - ; THUMBV6: adds r[[R3:[0-9]+]], r[[R2]], r[[R1:[0-9]+]] + ; THUMBV6: adds r[[R3:[0-9]+]], r[[R0]], r[[R1:[0-9]+]] ; THUMBV6: movs r[[R0]], #0 ; THUMBV6: movs r[[R1]], #1 ; THUMBV6: cmp r[[R3]], r[[R2]] Index: llvm/trunk/test/CodeGen/ARM/select-imm.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/select-imm.ll +++ llvm/trunk/test/CodeGen/ARM/select-imm.ll @@ -197,9 +197,9 @@ ; ARMT2-LABEL: t8: ; ARMT2: mov r1, r0 +; ARMT2: cmp r0, #5 ; ARMT2: mov r0, #9 ; ARMT2: mov r4, #0 -; ARMT2: cmp r1, #5 ; ARMT2: movweq r4, #1 ; ARMT2: bl t7 @@ -213,8 +213,8 @@ ; THUMB2-LABEL: t8: ; THUMB2: mov r1, r0 -; THUMB2: movs r4, #0 -; THUMB2: cmp r1, #5 +; THUMB2: cmp r0, #5 +; THUMB2: mov.w r4, #0 ; THUMB2: it eq ; THUMB2: moveq r4, #1 %cmp = icmp eq i32 %a, 5 Index: llvm/trunk/test/CodeGen/ARM/swifterror.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/swifterror.ll +++ llvm/trunk/test/CodeGen/ARM/swifterror.ll @@ -182,7 +182,7 @@ ; CHECK-APPLE: beq ; CHECK-APPLE: mov r0, #16 ; CHECK-APPLE: malloc -; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8] +; CHECK-APPLE: strb r{{.*}}, [r0, #8] ; CHECK-APPLE: ble ; CHECK-APPLE: mov r8, [[ID]] Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/ashr.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/ashr.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/ashr.ll @@ -800,7 +800,7 @@ ; MMR3-NEXT: sw $5, 36($sp) # 4-byte Folded Spill ; MMR3-NEXT: sw $4, 8($sp) # 4-byte Folded Spill ; MMR3-NEXT: lw $16, 76($sp) -; MMR3-NEXT: srlv $4, $8, $16 +; MMR3-NEXT: srlv $4, $7, $16 ; MMR3-NEXT: not16 $3, $16 ; MMR3-NEXT: sw $3, 24($sp) # 4-byte Folded Spill ; MMR3-NEXT: sll16 $2, $6, 1 @@ -890,7 +890,7 @@ ; MMR6-NEXT: lw $3, 68($sp) ; MMR6-NEXT: li16 $2, 64 ; MMR6-NEXT: subu16 $7, $2, $3 -; MMR6-NEXT: sllv $8, $6, $7 +; MMR6-NEXT: sllv $8, $5, $7 ; MMR6-NEXT: andi16 $5, $7, 32 ; MMR6-NEXT: selnez $9, $8, $5 ; MMR6-NEXT: sllv $16, $4, $7 Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/lshr.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/lshr.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/lshr.ll @@ -828,7 +828,7 @@ ; MMR3-NEXT: move $17, $5 ; MMR3-NEXT: sw $4, 8($sp) # 4-byte Folded Spill ; MMR3-NEXT: lw $16, 76($sp) -; MMR3-NEXT: srlv $7, $8, $16 +; MMR3-NEXT: srlv $7, $7, $16 ; MMR3-NEXT: not16 $3, $16 ; MMR3-NEXT: sw $3, 24($sp) # 4-byte Folded Spill ; MMR3-NEXT: sll16 $2, $6, 1 @@ -915,14 +915,14 @@ ; MMR6-NEXT: move $1, $7 ; MMR6-NEXT: sw $5, 8($sp) # 4-byte Folded Spill ; MMR6-NEXT: move $16, $4 -; MMR6-NEXT: sw $16, 32($sp) # 4-byte Folded Spill +; MMR6-NEXT: sw $4, 32($sp) # 4-byte Folded Spill ; MMR6-NEXT: lw $3, 76($sp) -; MMR6-NEXT: srlv $2, $1, $3 +; MMR6-NEXT: srlv $2, $7, $3 ; MMR6-NEXT: not16 $5, $3 ; MMR6-NEXT: sw $5, 24($sp) # 4-byte Folded Spill ; MMR6-NEXT: move $4, $6 -; MMR6-NEXT: sw $4, 28($sp) # 4-byte Folded Spill -; MMR6-NEXT: sll16 $6, $4, 1 +; MMR6-NEXT: sw $6, 28($sp) # 4-byte Folded Spill +; MMR6-NEXT: sll16 $6, $6, 1 ; MMR6-NEXT: sllv $17, $6, $5 ; MMR6-NEXT: or16 $17, $2 ; MMR6-NEXT: addiu $7, $3, -64 @@ -956,7 +956,7 @@ ; MMR6-NEXT: sw $7, 4($sp) # 4-byte Folded Spill ; MMR6-NEXT: not16 $6, $6 ; MMR6-NEXT: move $7, $17 -; MMR6-NEXT: srl16 $17, $7, 1 +; MMR6-NEXT: srl16 $17, $17, 1 ; MMR6-NEXT: srlv $6, $17, $6 ; MMR6-NEXT: lw $17, 4($sp) # 4-byte Folded Reload ; MMR6-NEXT: or16 $6, $17 Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/shl.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/shl.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/shl.ll @@ -857,7 +857,7 @@ ; MMR3-NEXT: sw $5, 32($sp) # 4-byte Folded Spill ; MMR3-NEXT: move $1, $4 ; MMR3-NEXT: lw $16, 76($sp) -; MMR3-NEXT: sllv $2, $1, $16 +; MMR3-NEXT: sllv $2, $4, $16 ; MMR3-NEXT: not16 $4, $16 ; MMR3-NEXT: sw $4, 24($sp) # 4-byte Folded Spill ; MMR3-NEXT: srl16 $3, $5, 1 @@ -946,7 +946,7 @@ ; MMR6-NEXT: sw $6, 4($sp) # 4-byte Folded Spill ; MMR6-NEXT: move $1, $4 ; MMR6-NEXT: lw $3, 60($sp) -; MMR6-NEXT: sllv $2, $1, $3 +; MMR6-NEXT: sllv $2, $4, $3 ; MMR6-NEXT: not16 $4, $3 ; MMR6-NEXT: sw $4, 16($sp) # 4-byte Folded Spill ; MMR6-NEXT: sw $5, 20($sp) # 4-byte Folded Spill Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/sub.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/sub.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/sub.ll @@ -163,7 +163,7 @@ ; MMR3: subu16 $5, $[[T19]], $[[T20]] ; MMR6: move $[[T0:[0-9]+]], $7 -; MMR6: sw $[[T0]], 8($sp) +; MMR6: sw $7, 8($sp) ; MMR6: move $[[T1:[0-9]+]], $5 ; MMR6: sw $4, 12($sp) ; MMR6: lw $[[T2:[0-9]+]], 48($sp) Index: llvm/trunk/test/CodeGen/PowerPC/MCSE-caller-preserved-reg.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/MCSE-caller-preserved-reg.ll +++ llvm/trunk/test/CodeGen/PowerPC/MCSE-caller-preserved-reg.ll @@ -20,9 +20,9 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset lr, 16 ; CHECK-NEXT: .cfi_offset r30, -16 +; CHECK-NEXT: ld 12, 0(3) ; CHECK-NEXT: std 30, 32(1) ; CHECK-NEXT: mr 30, 3 -; CHECK-NEXT: ld 12, 0(30) ; CHECK-NEXT: std 2, 24(1) ; CHECK-NEXT: mtctr 12 ; CHECK-NEXT: bctrl Index: llvm/trunk/test/CodeGen/PowerPC/fma-mutate.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/fma-mutate.ll +++ llvm/trunk/test/CodeGen/PowerPC/fma-mutate.ll @@ -14,7 +14,8 @@ ret double %r ; CHECK: @foo3 -; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]] +; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]] +; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]] ; CHECK: xsmaddmdp ; CHECK: xsmaddadp } Index: llvm/trunk/test/CodeGen/PowerPC/gpr-vsr-spill.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/gpr-vsr-spill.ll +++ llvm/trunk/test/CodeGen/PowerPC/gpr-vsr-spill.ll @@ -16,8 +16,8 @@ ret i32 %e.0 ; CHECK: @foo ; CHECK: mr [[NEWREG:[0-9]+]], 3 +; CHECK: mr [[REG1:[0-9]+]], 4 ; CHECK: mtvsrd [[NEWREG2:[0-9]+]], 4 -; CHECK: mffprd [[REG1:[0-9]+]], [[NEWREG2]] ; CHECK: add {{[0-9]+}}, [[NEWREG]], [[REG1]] ; CHECK: mffprd [[REG2:[0-9]+]], [[NEWREG2]] ; CHECK: add {{[0-9]+}}, [[REG2]], [[NEWREG]] Index: llvm/trunk/test/CodeGen/PowerPC/licm-remat.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/licm-remat.ll +++ llvm/trunk/test/CodeGen/PowerPC/licm-remat.ll @@ -20,8 +20,8 @@ define linkonce_odr void @ZN6snappyDecompressor_(%"class.snappy::SnappyDecompressor"* %this, %"class.snappy::SnappyIOVecWriter"* %writer) { ; CHECK-LABEL: ZN6snappyDecompressor_: ; CHECK: # %bb.0: # %entry -; CHECK: addis 3, 2, _ZN6snappy8internalL8wordmaskE@toc@ha -; CHECK-DAG: addi 25, 3, _ZN6snappy8internalL8wordmaskE@toc@l +; CHECK: addis 23, 2, _ZN6snappy8internalL8wordmaskE@toc@ha +; CHECK-DAG: addi 25, 23, _ZN6snappy8internalL8wordmaskE@toc@l ; CHECK-DAG: addis 5, 2, _ZN6snappy8internalL10char_tableE@toc@ha ; CHECK-DAG: addi 24, 5, _ZN6snappy8internalL10char_tableE@toc@l ; CHECK: b .LBB0_2 Index: llvm/trunk/test/CodeGen/PowerPC/opt-li-add-to-addi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/opt-li-add-to-addi.ll +++ llvm/trunk/test/CodeGen/PowerPC/opt-li-add-to-addi.ll @@ -3,7 +3,7 @@ define i64 @testOptimizeLiAddToAddi(i64 %a) { ; CHECK-LABEL: testOptimizeLiAddToAddi: -; CHECK: addi 3, 30, 2444 +; CHECK: addi 3, 3, 2444 ; CHECK: bl callv ; CHECK: addi 3, 30, 234 ; CHECK: bl call Index: llvm/trunk/test/CodeGen/PowerPC/tail-dup-layout.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/tail-dup-layout.ll +++ llvm/trunk/test/CodeGen/PowerPC/tail-dup-layout.ll @@ -25,7 +25,7 @@ ;CHECK-LABEL: straight_test: ; test1 may have been merged with entry ;CHECK: mr [[TAGREG:[0-9]+]], 3 -;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1 +;CHECK: andi. {{[0-9]+}}, [[TAGREG:[0-9]+]], 1 ;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]] ;CHECK-NEXT: # %test2 ;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30 Index: llvm/trunk/test/CodeGen/SPARC/32abi.ll =================================================================== --- llvm/trunk/test/CodeGen/SPARC/32abi.ll +++ llvm/trunk/test/CodeGen/SPARC/32abi.ll @@ -156,9 +156,9 @@ ; HARD-NEXT: std %o0, [%sp+96] ; HARD-NEXT: st %o1, [%sp+92] ; HARD-NEXT: mov %i0, %o2 -; HARD-NEXT: mov %o0, %o3 +; HARD-NEXT: mov %i1, %o3 ; HARD-NEXT: mov %o1, %o4 -; HARD-NEXT: mov %o0, %o5 +; HARD-NEXT: mov %i1, %o5 ; HARD-NEXT: call floatarg ; HARD: std %f0, [%i4] ; SOFT: st %i0, [%sp+104] Index: llvm/trunk/test/CodeGen/SPARC/atomics.ll =================================================================== --- llvm/trunk/test/CodeGen/SPARC/atomics.ll +++ llvm/trunk/test/CodeGen/SPARC/atomics.ll @@ -235,8 +235,9 @@ ; CHECK-LABEL: test_load_add_i32 ; CHECK: membar -; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]] -; CHECK: cas [%o0], [[V]], [[U]] +; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]] +; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]] +; CHECK: cas [%o0], [[V]], [[V2]] ; CHECK: membar define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) { entry: Index: llvm/trunk/test/CodeGen/SystemZ/vec-sub-01.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/vec-sub-01.ll +++ llvm/trunk/test/CodeGen/SystemZ/vec-sub-01.ll @@ -46,12 +46,12 @@ ; CHECK-LABEL: f5: ; CHECK-DAG: vlr %v[[A1:[0-5]]], %v24 ; CHECK-DAG: vlr %v[[A2:[0-5]]], %v26 -; CHECK-DAG: vrepf %v[[B1:[0-5]]], %v[[A1]], 1 -; CHECK-DAG: vrepf %v[[B2:[0-5]]], %v[[A2]], 1 -; CHECK-DAG: vrepf %v[[C1:[0-5]]], %v[[A1]], 2 -; CHECK-DAG: vrepf %v[[C2:[0-5]]], %v[[A2]], 2 -; CHECK-DAG: vrepf %v[[D1:[0-5]]], %v[[A1]], 3 -; CHECK-DAG: vrepf %v[[D2:[0-5]]], %v[[A2]], 3 +; CHECK-DAG: vrepf %v[[B1:[0-5]]], %v24, 1 +; CHECK-DAG: vrepf %v[[B2:[0-5]]], %v26, 1 +; CHECK-DAG: vrepf %v[[C1:[0-5]]], %v24, 2 +; CHECK-DAG: vrepf %v[[C2:[0-5]]], %v26, 2 +; CHECK-DAG: vrepf %v[[D1:[0-5]]], %v24, 3 +; CHECK-DAG: vrepf %v[[D2:[0-5]]], %v26, 3 ; CHECK-DAG: sebr %f[[A1]], %f[[A2]] ; CHECK-DAG: sebr %f[[B1]], %f[[B2]] ; CHECK-DAG: sebr %f[[C1]], %f[[C2]] Index: llvm/trunk/test/CodeGen/Thumb/pr35836.ll =================================================================== --- llvm/trunk/test/CodeGen/Thumb/pr35836.ll +++ llvm/trunk/test/CodeGen/Thumb/pr35836.ll @@ -37,13 +37,13 @@ ; CHECK: adds r3, r0, r1 ; CHECK: push {r5} ; CHECK: pop {r1} -; CHECK: adcs r1, r1 +; CHECK: adcs r1, r5 ; CHECK: ldr r0, [sp, #12] @ 4-byte Reload ; CHECK: ldr r2, [sp, #8] @ 4-byte Reload ; CHECK: adds r2, r0, r2 ; CHECK: push {r5} ; CHECK: pop {r4} -; CHECK: adcs r4, r4 +; CHECK: adcs r4, r5 ; CHECK: adds r0, r2, r5 ; CHECK: push {r3} ; CHECK: pop {r0} Index: llvm/trunk/test/CodeGen/Thumb/thumb-shrink-wrapping.ll =================================================================== --- llvm/trunk/test/CodeGen/Thumb/thumb-shrink-wrapping.ll +++ llvm/trunk/test/CodeGen/Thumb/thumb-shrink-wrapping.ll @@ -598,7 +598,7 @@ define i32 @b_to_bx(i32 %value) { ; CHECK-LABEL: b_to_bx: ; DISABLE: push {r7, lr} -; CHECK: cmp r1, #49 +; CHECK: cmp r0, #49 ; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]] ; ENABLE: push {r7, lr} Index: llvm/trunk/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll +++ llvm/trunk/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll @@ -7,7 +7,7 @@ ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ; CHECK-NEXT: movl %ecx, %edx -; CHECK-NEXT: imull %edx, %edx +; CHECK-NEXT: imull %ecx, %edx ; CHECK-NEXT: imull %eax, %ecx ; CHECK-NEXT: imull %eax, %eax ; CHECK-NEXT: addl %edx, %eax Index: llvm/trunk/test/CodeGen/X86/arg-copy-elide.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/arg-copy-elide.ll +++ llvm/trunk/test/CodeGen/X86/arg-copy-elide.ll @@ -106,7 +106,7 @@ ; CHECK-DAG: movl %edx, %[[r1:[^ ]*]] ; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]] ; CHECK-DAG: movl %[[r2]], 4(%esp) -; CHECK-DAG: movl %[[r1]], (%esp) +; CHECK-DAG: movl %edx, (%esp) ; CHECK: movl %esp, %[[reg:[^ ]*]] ; CHECK: pushl %[[reg]] ; CHECK: calll _addrof_i64 Index: llvm/trunk/test/CodeGen/X86/avx-load-store.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx-load-store.ll +++ llvm/trunk/test/CodeGen/X86/avx-load-store.ll @@ -12,11 +12,11 @@ ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movq %rsi, %r15 ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: vmovaps (%rbx), %ymm0 +; CHECK-NEXT: vmovaps (%rdi), %ymm0 ; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill -; CHECK-NEXT: vmovaps (%r15), %ymm1 +; CHECK-NEXT: vmovaps (%rsi), %ymm1 ; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill -; CHECK-NEXT: vmovaps (%r14), %ymm2 +; CHECK-NEXT: vmovaps (%rdx), %ymm2 ; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill ; CHECK-NEXT: callq dummy ; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload Index: llvm/trunk/test/CodeGen/X86/avx512-bugfix-25270.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512-bugfix-25270.ll +++ llvm/trunk/test/CodeGen/X86/avx512-bugfix-25270.ll @@ -9,10 +9,10 @@ ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: subq $112, %rsp ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: vmovups (%rbx), %zmm0 +; CHECK-NEXT: vmovups (%rdi), %zmm0 ; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill ; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1 -; CHECK-NEXT: vmovaps %zmm1, (%rbx) +; CHECK-NEXT: vmovaps %zmm1, (%rdi) ; CHECK-NEXT: callq _Print__512 ; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload ; CHECK-NEXT: callq _Print__512 Index: llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll +++ llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll @@ -355,7 +355,7 @@ ; KNL_X32-NEXT: movl %edi, (%esp) ; KNL_X32-NEXT: calll _test11 ; KNL_X32-NEXT: movl %eax, %ebx -; KNL_X32-NEXT: movzbl %bl, %eax +; KNL_X32-NEXT: movzbl %al, %eax ; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp) ; KNL_X32-NEXT: movl %edi, (%esp) Index: llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll +++ llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll @@ -650,7 +650,7 @@ ; X32-NEXT: subl $24, %esp ; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill ; X32-NEXT: vmovdqa %xmm0, %xmm4 -; X32-NEXT: vmovdqa %xmm4, %xmm1 +; X32-NEXT: vmovdqa %xmm0, %xmm1 ; X32-NEXT: calll _test_argRet128Vector ; X32-NEXT: vmovdqa32 %xmm4, %xmm0 {%k1} ; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload @@ -668,7 +668,7 @@ ; WIN64-NEXT: .seh_savexmm 8, 0 ; WIN64-NEXT: .seh_endprologue ; WIN64-NEXT: vmovdqa %xmm0, %xmm8 -; WIN64-NEXT: vmovdqa %xmm8, %xmm1 +; WIN64-NEXT: vmovdqa %xmm0, %xmm1 ; WIN64-NEXT: callq test_argRet128Vector ; WIN64-NEXT: vmovdqa32 %xmm8, %xmm0 {%k1} ; WIN64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload @@ -689,7 +689,7 @@ ; LINUXOSX64-NEXT: .cfi_offset %rsp, -16 ; LINUXOSX64-NEXT: .cfi_offset %xmm8, -32 ; LINUXOSX64-NEXT: vmovdqa %xmm0, %xmm8 -; LINUXOSX64-NEXT: vmovdqa %xmm8, %xmm1 +; LINUXOSX64-NEXT: vmovdqa %xmm0, %xmm1 ; LINUXOSX64-NEXT: callq test_argRet128Vector ; LINUXOSX64-NEXT: vmovdqa32 %xmm8, %xmm0 {%k1} ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload @@ -908,12 +908,12 @@ ; X32-NEXT: subl $20, %esp ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %edi, %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %edx, %ebx -; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill +; X32-NEXT: movl %edx, (%esp) # 4-byte Spill ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %eax, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: subl %ecx, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, %ebp Index: llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll +++ llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll @@ -202,7 +202,7 @@ ; X32-NEXT: movl %edx, %ecx ; X32-NEXT: shrl $12, %ecx ; X32-NEXT: kmovd %ecx, %k3 -; X32-NEXT: movl %esi, %ecx +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: shrl $13, %ecx ; X32-NEXT: andb $1, %cl ; X32-NEXT: kmovd %ecx, %k4 @@ -340,7 +340,7 @@ ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $28, %k0, %k1 ; X32-NEXT: movl %esi, %ecx -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $28, %eax ; X32-NEXT: kmovd %eax, %k2 ; X32-NEXT: kxorq %k2, %k1, %k1 @@ -348,7 +348,7 @@ ; X32-NEXT: kshiftrq $35, %k1, %k1 ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $29, %k0, %k1 -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $29, %eax ; X32-NEXT: andb $1, %al ; X32-NEXT: kmovd %eax, %k2 @@ -357,7 +357,7 @@ ; X32-NEXT: kshiftrq $34, %k1, %k1 ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $30, %k0, %k1 -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $30, %eax ; X32-NEXT: kmovd %eax, %k2 ; X32-NEXT: kxorq %k2, %k1, %k1 @@ -751,7 +751,7 @@ ; X32-NEXT: movl %edx, %ecx ; X32-NEXT: shrl $12, %ecx ; X32-NEXT: kmovd %ecx, %k3 -; X32-NEXT: movl %esi, %ecx +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: shrl $13, %ecx ; X32-NEXT: andb $1, %cl ; X32-NEXT: kmovd %ecx, %k4 @@ -889,7 +889,7 @@ ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $28, %k0, %k1 ; X32-NEXT: movl %esi, %ecx -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $28, %eax ; X32-NEXT: kmovd %eax, %k2 ; X32-NEXT: kxorq %k2, %k1, %k1 @@ -897,7 +897,7 @@ ; X32-NEXT: kshiftrq $35, %k1, %k1 ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $29, %k0, %k1 -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $29, %eax ; X32-NEXT: andb $1, %al ; X32-NEXT: kmovd %eax, %k2 @@ -906,7 +906,7 @@ ; X32-NEXT: kshiftrq $34, %k1, %k1 ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $30, %k0, %k1 -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $30, %eax ; X32-NEXT: kmovd %eax, %k2 ; X32-NEXT: kxorq %k2, %k1, %k1 @@ -1781,7 +1781,7 @@ ; X32-NEXT: movl %edx, %ecx ; X32-NEXT: shrl $12, %ecx ; X32-NEXT: kmovd %ecx, %k3 -; X32-NEXT: movl %esi, %ecx +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: shrl $13, %ecx ; X32-NEXT: andb $1, %cl ; X32-NEXT: kmovd %ecx, %k4 @@ -1919,7 +1919,7 @@ ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $28, %k0, %k1 ; X32-NEXT: movl %esi, %ecx -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $28, %eax ; X32-NEXT: kmovd %eax, %k2 ; X32-NEXT: kxorq %k2, %k1, %k1 @@ -1927,7 +1927,7 @@ ; X32-NEXT: kshiftrq $35, %k1, %k1 ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $29, %k0, %k1 -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $29, %eax ; X32-NEXT: andb $1, %al ; X32-NEXT: kmovd %eax, %k2 @@ -1936,7 +1936,7 @@ ; X32-NEXT: kshiftrq $34, %k1, %k1 ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $30, %k0, %k1 -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $30, %eax ; X32-NEXT: kmovd %eax, %k2 ; X32-NEXT: kxorq %k2, %k1, %k1 @@ -2423,7 +2423,7 @@ ; X32-NEXT: movl %edx, %ecx ; X32-NEXT: shrl $12, %ecx ; X32-NEXT: kmovd %ecx, %k3 -; X32-NEXT: movl %esi, %ecx +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: shrl $13, %ecx ; X32-NEXT: andb $1, %cl ; X32-NEXT: kmovd %ecx, %k4 @@ -2561,7 +2561,7 @@ ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $28, %k0, %k1 ; X32-NEXT: movl %esi, %ecx -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $28, %eax ; X32-NEXT: kmovd %eax, %k2 ; X32-NEXT: kxorq %k2, %k1, %k1 @@ -2569,7 +2569,7 @@ ; X32-NEXT: kshiftrq $35, %k1, %k1 ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $29, %k0, %k1 -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $29, %eax ; X32-NEXT: andb $1, %al ; X32-NEXT: kmovd %eax, %k2 @@ -2578,7 +2578,7 @@ ; X32-NEXT: kshiftrq $34, %k1, %k1 ; X32-NEXT: kxorq %k1, %k0, %k0 ; X32-NEXT: kshiftrq $30, %k0, %k1 -; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: shrl $30, %eax ; X32-NEXT: kmovd %eax, %k2 ; X32-NEXT: kxorq %k2, %k1, %k1 Index: llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll +++ llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll @@ -1876,7 +1876,7 @@ ; AVX512F-32-NEXT: kshiftrq $5, %k7, %k0 ; AVX512F-32-NEXT: kxorq %k4, %k0, %k4 ; AVX512F-32-NEXT: kmovd %ecx, %k0 -; AVX512F-32-NEXT: movl %ebp, %ecx +; AVX512F-32-NEXT: movl %ebx, %ecx ; AVX512F-32-NEXT: shrl $13, %ecx ; AVX512F-32-NEXT: andb $1, %cl ; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4 @@ -2576,7 +2576,7 @@ ; AVX512F-32-NEXT: kshiftrq $5, %k7, %k0 ; AVX512F-32-NEXT: kxorq %k4, %k0, %k4 ; AVX512F-32-NEXT: kmovd %ecx, %k0 -; AVX512F-32-NEXT: movl %ebp, %ecx +; AVX512F-32-NEXT: movl %ebx, %ecx ; AVX512F-32-NEXT: shrl $13, %ecx ; AVX512F-32-NEXT: andb $1, %cl ; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4 Index: llvm/trunk/test/CodeGen/X86/buildvec-insertvec.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/buildvec-insertvec.ll +++ llvm/trunk/test/CodeGen/X86/buildvec-insertvec.ll @@ -38,7 +38,7 @@ ; SSE2-LABEL: test_negative_zero_1: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE2-NEXT: xorps %xmm2, %xmm2 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero Index: llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll +++ llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll @@ -197,8 +197,8 @@ ; SSE-NEXT: cvtss2sd %xmm2, %xmm4 ; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3] ; SSE-NEXT: movaps %xmm2, %xmm6 -; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3] ; SSE-NEXT: movaps {{.*#+}} xmm7 ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: andps %xmm7, %xmm2 @@ -213,7 +213,7 @@ ; SSE-NEXT: orps %xmm0, %xmm4 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0] ; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; SSE-NEXT: andps %xmm7, %xmm0 ; SSE-NEXT: cvtss2sd %xmm3, %xmm3 ; SSE-NEXT: andps %xmm8, %xmm3 @@ -260,7 +260,7 @@ ; SSE-NEXT: orps %xmm6, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE-NEXT: movaps %xmm3, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1] ; SSE-NEXT: andps %xmm5, %xmm1 ; SSE-NEXT: xorps %xmm6, %xmm6 ; SSE-NEXT: cvtsd2ss %xmm2, %xmm6 Index: llvm/trunk/test/CodeGen/X86/combine-shl.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/combine-shl.ll +++ llvm/trunk/test/CodeGen/X86/combine-shl.ll @@ -204,7 +204,7 @@ ; SSE-LABEL: combine_vec_shl_ext_shl0: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE-NEXT: pslld $20, %xmm1 ; SSE-NEXT: pslld $20, %xmm0 Index: llvm/trunk/test/CodeGen/X86/complex-fastmath.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/complex-fastmath.ll +++ llvm/trunk/test/CodeGen/X86/complex-fastmath.ll @@ -14,7 +14,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: addss %xmm2, %xmm2 +; SSE-NEXT: addss %xmm0, %xmm2 ; SSE-NEXT: mulss %xmm1, %xmm2 ; SSE-NEXT: mulss %xmm0, %xmm0 ; SSE-NEXT: mulss %xmm1, %xmm1 @@ -58,9 +58,9 @@ ; SSE-LABEL: complex_square_f64: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: addsd %xmm2, %xmm2 +; SSE-NEXT: addsd %xmm0, %xmm2 ; SSE-NEXT: mulsd %xmm1, %xmm2 ; SSE-NEXT: mulsd %xmm0, %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm1 @@ -161,9 +161,9 @@ ; SSE-LABEL: complex_mul_f64: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: mulsd %xmm0, %xmm4 ; SSE-NEXT: mulsd %xmm1, %xmm0 Index: llvm/trunk/test/CodeGen/X86/divide-by-constant.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/divide-by-constant.ll +++ llvm/trunk/test/CodeGen/X86/divide-by-constant.ll @@ -312,7 +312,7 @@ ; X64: # %bb.0: # %entry ; X64-NEXT: movq %rdi, %rcx ; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F -; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %rdx ; X64-NEXT: shrq $12, %rdx ; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039 Index: llvm/trunk/test/CodeGen/X86/fmaxnum.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fmaxnum.ll +++ llvm/trunk/test/CodeGen/X86/fmaxnum.ll @@ -18,7 +18,7 @@ ; CHECK-LABEL: @test_fmaxf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: maxss %xmm0, %xmm1 @@ -47,7 +47,7 @@ ; CHECK-LABEL: @test_fmax ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm2, %xmm2 +; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: maxsd %xmm0, %xmm1 @@ -74,7 +74,7 @@ ; CHECK-LABEL: @test_intrinsic_fmaxf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: maxss %xmm0, %xmm1 @@ -95,7 +95,7 @@ ; CHECK-LABEL: @test_intrinsic_fmax ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm2, %xmm2 +; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: maxsd %xmm0, %xmm1 Index: llvm/trunk/test/CodeGen/X86/fmf-flags.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fmf-flags.ll +++ llvm/trunk/test/CodeGen/X86/fmf-flags.ll @@ -30,7 +30,7 @@ ; X64-LABEL: fast_fmuladd_opts: ; X64: # %bb.0: ; X64-NEXT: movaps %xmm0, %xmm1 -; X64-NEXT: addss %xmm1, %xmm1 +; X64-NEXT: addss %xmm0, %xmm1 ; X64-NEXT: addss %xmm0, %xmm1 ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: retq Index: llvm/trunk/test/CodeGen/X86/fminnum.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fminnum.ll +++ llvm/trunk/test/CodeGen/X86/fminnum.ll @@ -18,7 +18,7 @@ ; CHECK-LABEL: @test_fminf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: minss %xmm0, %xmm1 @@ -40,7 +40,7 @@ ; CHECK-LABEL: @test_fmin ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm2, %xmm2 +; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: minsd %xmm0, %xmm1 @@ -67,7 +67,7 @@ ; CHECK-LABEL: @test_intrinsic_fminf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: minss %xmm0, %xmm1 @@ -87,7 +87,7 @@ ; CHECK-LABEL: @test_intrinsic_fmin ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm2, %xmm2 +; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: minsd %xmm0, %xmm1 Index: llvm/trunk/test/CodeGen/X86/fp128-i128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fp128-i128.ll +++ llvm/trunk/test/CodeGen/X86/fp128-i128.ll @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: movaps %xmm0, %xmm1 -; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) +; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq $0, (%rsp) @@ -275,7 +275,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: movaps %xmm0, %xmm1 -; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) +; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq $0, (%rsp) Index: llvm/trunk/test/CodeGen/X86/h-registers-1.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/h-registers-1.ll +++ llvm/trunk/test/CodeGen/X86/h-registers-1.ll @@ -32,8 +32,7 @@ ; CHECK-NEXT: movzbl %ah, %eax ; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ebx ; CHECK-NEXT: movzbl %bh, %edi -; CHECK-NEXT: movq %r10, %r8 -; CHECK-NEXT: addq %r8, %rsi +; CHECK-NEXT: addq %r10, %rsi ; CHECK-NEXT: addq %r11, %rdx ; CHECK-NEXT: addq %rsi, %rdx ; CHECK-NEXT: addq %rbp, %rcx @@ -68,8 +67,7 @@ ; GNUX32-NEXT: movzbl %ah, %eax ; GNUX32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; GNUX32-NEXT: movzbl %bh, %edi -; GNUX32-NEXT: movq %r10, %r8 -; GNUX32-NEXT: addq %r8, %rsi +; GNUX32-NEXT: addq %r10, %rsi ; GNUX32-NEXT: addq %r11, %rdx ; GNUX32-NEXT: addq %rsi, %rdx ; GNUX32-NEXT: addq %rbp, %rcx Index: llvm/trunk/test/CodeGen/X86/haddsub-2.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/haddsub-2.ll +++ llvm/trunk/test/CodeGen/X86/haddsub-2.ll @@ -896,16 +896,16 @@ ; SSE-LABEL: not_a_hsub_2: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: subss %xmm3, %xmm2 ; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE-NEXT: subss %xmm3, %xmm0 ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3] ; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1] ; SSE-NEXT: subss %xmm4, %xmm3 ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] ; SSE-NEXT: subss %xmm4, %xmm1 @@ -953,10 +953,10 @@ ; SSE-LABEL: not_a_hsub_3: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1] ; SSE-NEXT: subsd %xmm2, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: subsd %xmm0, %xmm2 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movapd %xmm2, %xmm0 Index: llvm/trunk/test/CodeGen/X86/haddsub-3.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/haddsub-3.ll +++ llvm/trunk/test/CodeGen/X86/haddsub-3.ll @@ -7,10 +7,10 @@ ; SSE2-LABEL: pr26491: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[3,3] ; SSE2-NEXT: addps %xmm0, %xmm1 ; SSE2-NEXT: movaps %xmm1, %xmm0 -; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; SSE2-NEXT: addss %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -19,7 +19,7 @@ ; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSSE3-NEXT: addps %xmm0, %xmm1 ; SSSE3-NEXT: movaps %xmm1, %xmm0 -; SSSE3-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; SSSE3-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; SSSE3-NEXT: addss %xmm1, %xmm0 ; SSSE3-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/haddsub-undef.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/haddsub-undef.ll +++ llvm/trunk/test/CodeGen/X86/haddsub-undef.ll @@ -103,7 +103,7 @@ ; SSE-LABEL: test5_undef: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE-NEXT: addsd %xmm0, %xmm1 ; SSE-NEXT: movapd %xmm1, %xmm0 ; SSE-NEXT: retq @@ -168,7 +168,7 @@ ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE-NEXT: addss %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] ; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] Index: llvm/trunk/test/CodeGen/X86/half.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/half.ll +++ llvm/trunk/test/CodeGen/X86/half.ll @@ -386,7 +386,7 @@ ; CHECK-LIBCALL-NEXT: pushq %rbx ; CHECK-LIBCALL-NEXT: subq $48, %rsp ; CHECK-LIBCALL-NEXT: movq %rdi, %rbx -; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi +; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi ; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee ; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill ; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi @@ -472,7 +472,7 @@ ; CHECK-LIBCALL-NEXT: pushq %rbx ; CHECK-LIBCALL-NEXT: subq $16, %rsp ; CHECK-LIBCALL-NEXT: movq %rdi, %rbx -; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi +; CHECK-LIBCALL-NEXT: movzwl 4(%rdi), %edi ; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee ; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill ; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi @@ -657,7 +657,7 @@ ; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill ; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp ; CHECK-I686-NEXT: movaps %xmm0, %xmm1 -; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] +; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] ; CHECK-I686-NEXT: movss %xmm1, (%esp) ; CHECK-I686-NEXT: calll __gnu_f2h_ieee ; CHECK-I686-NEXT: movw %ax, %si Index: llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll +++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll @@ -40,7 +40,7 @@ ; X86-SSE42-LABEL: test_reduce_v2i64: ; X86-SSE42: ## %bb.0: ; X86-SSE42-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X86-SSE42-NEXT: pcmpgtq %xmm2, %xmm0 ; X86-SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2 ; X86-SSE42-NEXT: movd %xmm2, %eax @@ -80,7 +80,7 @@ ; X64-SSE42-LABEL: test_reduce_v2i64: ; X64-SSE42: ## %bb.0: ; X64-SSE42-NEXT: movdqa %xmm0, %xmm1 -; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X64-SSE42-NEXT: pcmpgtq %xmm2, %xmm0 ; X64-SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2 ; X64-SSE42-NEXT: movq %xmm2, %rax @@ -1061,7 +1061,7 @@ ; X86-SSE2-NEXT: subl $28, %esp ; X86-SSE2-NEXT: .cfi_def_cfa_offset 32 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm5 -; X86-SSE2-NEXT: movdqa %xmm5, (%esp) ## 16-byte Spill +; X86-SSE2-NEXT: movdqa %xmm3, (%esp) ## 16-byte Spill ; X86-SSE2-NEXT: movdqa %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 @@ -1079,7 +1079,7 @@ ; X86-SSE2-NEXT: por %xmm6, %xmm5 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm6 ; X86-SSE2-NEXT: pxor %xmm4, %xmm6 -; X86-SSE2-NEXT: movdqa %xmm1, %xmm7 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm7 ; X86-SSE2-NEXT: pxor %xmm4, %xmm7 ; X86-SSE2-NEXT: movdqa %xmm7, %xmm0 ; X86-SSE2-NEXT: pcmpgtd %xmm6, %xmm0 @@ -1134,7 +1134,7 @@ ; X86-SSE42-LABEL: test_reduce_v8i64: ; X86-SSE42: ## %bb.0: ; X86-SSE42-NEXT: movdqa %xmm0, %xmm4 -; X86-SSE42-NEXT: movdqa %xmm4, %xmm5 +; X86-SSE42-NEXT: movdqa %xmm0, %xmm5 ; X86-SSE42-NEXT: pcmpgtq %xmm2, %xmm5 ; X86-SSE42-NEXT: movdqa %xmm1, %xmm0 ; X86-SSE42-NEXT: pcmpgtq %xmm3, %xmm0 @@ -1260,7 +1260,7 @@ ; X64-SSE42-LABEL: test_reduce_v8i64: ; X64-SSE42: ## %bb.0: ; X64-SSE42-NEXT: movdqa %xmm0, %xmm4 -; X64-SSE42-NEXT: movdqa %xmm4, %xmm5 +; X64-SSE42-NEXT: movdqa %xmm0, %xmm5 ; X64-SSE42-NEXT: pcmpgtq %xmm2, %xmm5 ; X64-SSE42-NEXT: movdqa %xmm1, %xmm0 ; X64-SSE42-NEXT: pcmpgtq %xmm3, %xmm0 Index: llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll +++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll @@ -40,7 +40,7 @@ ; X86-SSE42-LABEL: test_reduce_v2i64: ; X86-SSE42: ## %bb.0: ; X86-SSE42-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X86-SSE42-NEXT: movdqa %xmm2, %xmm0 ; X86-SSE42-NEXT: pcmpgtq %xmm1, %xmm0 ; X86-SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2 @@ -81,7 +81,7 @@ ; X64-SSE42-LABEL: test_reduce_v2i64: ; X64-SSE42: ## %bb.0: ; X64-SSE42-NEXT: movdqa %xmm0, %xmm1 -; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X64-SSE42-NEXT: movdqa %xmm2, %xmm0 ; X64-SSE42-NEXT: pcmpgtq %xmm1, %xmm0 ; X64-SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2 @@ -1065,10 +1065,10 @@ ; X86-SSE2-NEXT: subl $28, %esp ; X86-SSE2-NEXT: .cfi_def_cfa_offset 32 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm6 -; X86-SSE2-NEXT: movdqa %xmm6, (%esp) ## 16-byte Spill +; X86-SSE2-NEXT: movdqa %xmm2, (%esp) ## 16-byte Spill ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0] -; X86-SSE2-NEXT: movdqa %xmm2, %xmm5 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm5 ; X86-SSE2-NEXT: pxor %xmm4, %xmm5 ; X86-SSE2-NEXT: pxor %xmm4, %xmm6 ; X86-SSE2-NEXT: movdqa %xmm6, %xmm7 Index: llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll +++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll @@ -40,7 +40,7 @@ ; X86-SSE42-LABEL: test_reduce_v2i64: ; X86-SSE42: ## %bb.0: ; X86-SSE42-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X86-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648] ; X86-SSE42-NEXT: pxor %xmm3, %xmm0 ; X86-SSE42-NEXT: pxor %xmm2, %xmm3 @@ -86,7 +86,7 @@ ; X64-SSE42-LABEL: test_reduce_v2i64: ; X64-SSE42: ## %bb.0: ; X64-SSE42-NEXT: movdqa %xmm0, %xmm1 -; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X64-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] ; X64-SSE42-NEXT: pxor %xmm3, %xmm0 ; X64-SSE42-NEXT: pxor %xmm2, %xmm3 @@ -1206,7 +1206,7 @@ ; X86-SSE2-NEXT: subl $28, %esp ; X86-SSE2-NEXT: .cfi_def_cfa_offset 32 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm5 -; X86-SSE2-NEXT: movdqa %xmm5, (%esp) ## 16-byte Spill +; X86-SSE2-NEXT: movdqa %xmm3, (%esp) ## 16-byte Spill ; X86-SSE2-NEXT: movdqa %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 @@ -1224,7 +1224,7 @@ ; X86-SSE2-NEXT: por %xmm6, %xmm5 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm6 ; X86-SSE2-NEXT: pxor %xmm4, %xmm6 -; X86-SSE2-NEXT: movdqa %xmm1, %xmm7 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm7 ; X86-SSE2-NEXT: pxor %xmm4, %xmm7 ; X86-SSE2-NEXT: movdqa %xmm7, %xmm0 ; X86-SSE2-NEXT: pcmpgtd %xmm6, %xmm0 Index: llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll +++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll @@ -40,7 +40,7 @@ ; X86-SSE42-LABEL: test_reduce_v2i64: ; X86-SSE42: ## %bb.0: ; X86-SSE42-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X86-SSE42-NEXT: movdqa {{.*#+}} xmm0 = [0,2147483648,0,2147483648] ; X86-SSE42-NEXT: movdqa %xmm1, %xmm3 ; X86-SSE42-NEXT: pxor %xmm0, %xmm3 @@ -87,7 +87,7 @@ ; X64-SSE42-LABEL: test_reduce_v2i64: ; X64-SSE42: ## %bb.0: ; X64-SSE42-NEXT: movdqa %xmm0, %xmm1 -; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X64-SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808] ; X64-SSE42-NEXT: movdqa %xmm1, %xmm3 ; X64-SSE42-NEXT: pxor %xmm0, %xmm3 @@ -466,7 +466,7 @@ ; X86-SSE42: ## %bb.0: ; X86-SSE42-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648] -; X86-SSE42-NEXT: movdqa %xmm2, %xmm4 +; X86-SSE42-NEXT: movdqa %xmm0, %xmm4 ; X86-SSE42-NEXT: pxor %xmm3, %xmm4 ; X86-SSE42-NEXT: movdqa %xmm1, %xmm0 ; X86-SSE42-NEXT: pxor %xmm3, %xmm0 @@ -565,7 +565,7 @@ ; X64-SSE42: ## %bb.0: ; X64-SSE42-NEXT: movdqa %xmm0, %xmm2 ; X64-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] -; X64-SSE42-NEXT: movdqa %xmm2, %xmm4 +; X64-SSE42-NEXT: movdqa %xmm0, %xmm4 ; X64-SSE42-NEXT: pxor %xmm3, %xmm4 ; X64-SSE42-NEXT: movdqa %xmm1, %xmm0 ; X64-SSE42-NEXT: pxor %xmm3, %xmm0 @@ -1106,10 +1106,10 @@ ; X86-SSE2-NEXT: subl $28, %esp ; X86-SSE2-NEXT: .cfi_def_cfa_offset 32 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm6 -; X86-SSE2-NEXT: movdqa %xmm6, (%esp) ## 16-byte Spill +; X86-SSE2-NEXT: movdqa %xmm2, (%esp) ## 16-byte Spill ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] -; X86-SSE2-NEXT: movdqa %xmm2, %xmm5 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm5 ; X86-SSE2-NEXT: pxor %xmm4, %xmm5 ; X86-SSE2-NEXT: pxor %xmm4, %xmm6 ; X86-SSE2-NEXT: movdqa %xmm6, %xmm7 Index: llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll +++ llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll @@ -161,6 +161,7 @@ ; CHECK-NEXT: fstpt (%esp) ; CHECK-NEXT: calll _ceil ; CHECK-NEXT: fld %st(0) +; CHECK-NEXT: fxch %st(1) ; CHECK-NEXT: ## InlineAsm Start ; CHECK-NEXT: fistpl %st(0) ; CHECK-NEXT: ## InlineAsm End Index: llvm/trunk/test/CodeGen/X86/ipra-local-linkage.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/ipra-local-linkage.ll +++ llvm/trunk/test/CodeGen/X86/ipra-local-linkage.ll @@ -24,7 +24,7 @@ call void @foo() ; CHECK-LABEL: bar: ; CHECK: callq foo - ; CHECK-NEXT: movl %eax, %r15d + ; CHECK-NEXT: movl %edi, %r15d call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X) ret void } Index: llvm/trunk/test/CodeGen/X86/localescape.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/localescape.ll +++ llvm/trunk/test/CodeGen/X86/localescape.ll @@ -27,7 +27,7 @@ ; X64-LABEL: print_framealloc_from_fp: ; X64: movq %rcx, %[[parent_fp:[a-z]+]] -; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx +; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx ; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]] ; X64: movq %[[str]], %rcx ; X64: callq printf Index: llvm/trunk/test/CodeGen/X86/machine-cp.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/machine-cp.ll +++ llvm/trunk/test/CodeGen/X86/machine-cp.ll @@ -8,7 +8,7 @@ ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: movl %esi, %edx ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: testl %edx, %edx +; CHECK-NEXT: testl %esi, %esi ; CHECK-NEXT: je LBB0_1 ; CHECK-NEXT: .p2align 4, 0x90 ; CHECK-NEXT: LBB0_2: ## %while.body @@ -59,7 +59,7 @@ ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: movq %rsi, %rdx ; CHECK-NEXT: movq %rdi, %rax -; CHECK-NEXT: testq %rdx, %rdx +; CHECK-NEXT: testq %rsi, %rsi ; CHECK-NEXT: je LBB2_1 ; CHECK-NEXT: .p2align 4, 0x90 ; CHECK-NEXT: LBB2_2: ## %while.body Index: llvm/trunk/test/CodeGen/X86/mul-i1024.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mul-i1024.ll +++ llvm/trunk/test/CodeGen/X86/mul-i1024.ll @@ -38,7 +38,7 @@ ; X32-NEXT: movl %edx, %eax ; X32-NEXT: adcl %edi, %eax ; X32-NEXT: movl %edi, %ecx -; X32-NEXT: movl %ecx, -204(%ebp) # 4-byte Spill +; X32-NEXT: movl %edi, -204(%ebp) # 4-byte Spill ; X32-NEXT: movl %eax, -892(%ebp) # 4-byte Spill ; X32-NEXT: movl 12(%ebp), %eax ; X32-NEXT: movl 36(%eax), %eax @@ -47,7 +47,7 @@ ; X32-NEXT: mull %edx ; X32-NEXT: movl %edx, -236(%ebp) # 4-byte Spill ; X32-NEXT: movl %eax, %edi -; X32-NEXT: movl %edi, -304(%ebp) # 4-byte Spill +; X32-NEXT: movl %eax, -304(%ebp) # 4-byte Spill ; X32-NEXT: addl %ecx, %edi ; X32-NEXT: movl %edi, -80(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, %eax @@ -58,7 +58,7 @@ ; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: mull %ecx ; X32-NEXT: movl %edx, %ecx -; X32-NEXT: movl %ecx, -124(%ebp) # 4-byte Spill +; X32-NEXT: movl %edx, -124(%ebp) # 4-byte Spill ; X32-NEXT: movl %eax, -184(%ebp) # 4-byte Spill ; X32-NEXT: movl %eax, %edx ; X32-NEXT: movl -400(%ebp), %esi # 4-byte Reload @@ -72,7 +72,7 @@ ; X32-NEXT: movl %eax, -656(%ebp) # 4-byte Spill ; X32-NEXT: leal (%ebx,%edi), %eax ; X32-NEXT: movl %edx, %edi -; X32-NEXT: leal (%ecx,%edi), %edx +; X32-NEXT: leal (%ecx,%edx), %edx ; X32-NEXT: adcl %eax, %edx ; X32-NEXT: movl %edx, -700(%ebp) # 4-byte Spill ; X32-NEXT: seto %al @@ -123,7 +123,7 @@ ; X32-NEXT: adcl %edi, %ebx ; X32-NEXT: movl %ebx, -424(%ebp) # 4-byte Spill ; X32-NEXT: movl %edi, %ebx -; X32-NEXT: movl %ebx, -256(%ebp) # 4-byte Spill +; X32-NEXT: movl %edi, -256(%ebp) # 4-byte Spill ; X32-NEXT: movl -100(%ebp), %eax # 4-byte Reload ; X32-NEXT: addl %eax, -80(%ebp) # 4-byte Folded Spill ; X32-NEXT: movl -204(%ebp), %eax # 4-byte Reload @@ -148,7 +148,7 @@ ; X32-NEXT: movzbl %bh, %eax ; X32-NEXT: adcl %edx, %eax ; X32-NEXT: movl %eax, %edi -; X32-NEXT: movl %edi, -72(%ebp) # 4-byte Spill +; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill ; X32-NEXT: movl 12(%ebp), %eax ; X32-NEXT: movl 8(%eax), %eax ; X32-NEXT: movl %eax, -108(%ebp) # 4-byte Spill @@ -220,7 +220,7 @@ ; X32-NEXT: mull %ecx ; X32-NEXT: movl %eax, -364(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, %ebx -; X32-NEXT: movl %ebx, -396(%ebp) # 4-byte Spill +; X32-NEXT: movl %edx, -396(%ebp) # 4-byte Spill ; X32-NEXT: movl -324(%ebp), %edx # 4-byte Reload ; X32-NEXT: movl %edx, %edi ; X32-NEXT: addl %eax, %edi @@ -252,7 +252,7 @@ ; X32-NEXT: mull %ebx ; X32-NEXT: movl %eax, %edi ; X32-NEXT: movl %edx, %esi -; X32-NEXT: movl %esi, -84(%ebp) # 4-byte Spill +; X32-NEXT: movl %edx, -84(%ebp) # 4-byte Spill ; X32-NEXT: movl 20(%ecx), %eax ; X32-NEXT: movl %eax, -252(%ebp) # 4-byte Spill ; X32-NEXT: mull %ebx @@ -303,7 +303,7 @@ ; X32-NEXT: movl -52(%ebp), %eax # 4-byte Reload ; X32-NEXT: adcl %edx, %eax ; X32-NEXT: movl %edx, %ebx -; X32-NEXT: movl %ebx, -56(%ebp) # 4-byte Spill +; X32-NEXT: movl %edx, -56(%ebp) # 4-byte Spill ; X32-NEXT: movl %eax, -780(%ebp) # 4-byte Spill ; X32-NEXT: movl -132(%ebp), %edx # 4-byte Reload ; X32-NEXT: movl %edx, %eax @@ -393,10 +393,10 @@ ; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: mull %ecx ; X32-NEXT: movl %eax, %ecx -; X32-NEXT: movl %ecx, -160(%ebp) # 4-byte Spill +; X32-NEXT: movl %eax, -160(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, -268(%ebp) # 4-byte Spill ; X32-NEXT: movl %ebx, %esi -; X32-NEXT: movl %esi, %eax +; X32-NEXT: movl %ebx, %eax ; X32-NEXT: addl %ecx, %eax ; X32-NEXT: movl -264(%ebp), %ebx # 4-byte Reload ; X32-NEXT: movl %ebx, %ecx @@ -425,7 +425,7 @@ ; X32-NEXT: adcl -60(%ebp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, -592(%ebp) # 4-byte Spill ; X32-NEXT: movl %esi, %edx -; X32-NEXT: movl %edx, %eax +; X32-NEXT: movl %esi, %eax ; X32-NEXT: movl -116(%ebp), %esi # 4-byte Reload ; X32-NEXT: addl %esi, %eax ; X32-NEXT: movl %ebx, %eax @@ -533,7 +533,7 @@ ; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: mull %ecx ; X32-NEXT: movl %eax, %ebx -; X32-NEXT: movl %ebx, -336(%ebp) # 4-byte Spill +; X32-NEXT: movl %eax, -336(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, %edi ; X32-NEXT: movl 52(%esi), %eax ; X32-NEXT: movl %eax, -144(%ebp) # 4-byte Spill @@ -559,7 +559,7 @@ ; X32-NEXT: movl -336(%ebp), %ebx # 4-byte Reload ; X32-NEXT: addl %eax, %ebx ; X32-NEXT: movl %edi, %edx -; X32-NEXT: movl %edx, -176(%ebp) # 4-byte Spill +; X32-NEXT: movl %edi, -176(%ebp) # 4-byte Spill ; X32-NEXT: adcl -360(%ebp), %edi # 4-byte Folded Reload ; X32-NEXT: addl %ecx, %ebx ; X32-NEXT: movl %ebx, -472(%ebp) # 4-byte Spill @@ -590,12 +590,12 @@ ; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: mull %ecx ; X32-NEXT: movl %edx, %esi -; X32-NEXT: movl %esi, -384(%ebp) # 4-byte Spill +; X32-NEXT: movl %edx, -384(%ebp) # 4-byte Spill ; X32-NEXT: movl -116(%ebp), %edi # 4-byte Reload ; X32-NEXT: movl %edi, %ecx ; X32-NEXT: movl %eax, %edx -; X32-NEXT: movl %edx, -480(%ebp) # 4-byte Spill -; X32-NEXT: addl %edx, %ecx +; X32-NEXT: movl %eax, -480(%ebp) # 4-byte Spill +; X32-NEXT: addl %eax, %ecx ; X32-NEXT: movl -84(%ebp), %ebx # 4-byte Reload ; X32-NEXT: movl %ebx, %eax ; X32-NEXT: adcl %esi, %eax @@ -642,8 +642,8 @@ ; X32-NEXT: movl %eax, %ecx ; X32-NEXT: addl %esi, %ecx ; X32-NEXT: movl %edx, %esi -; X32-NEXT: movl %esi, -496(%ebp) # 4-byte Spill -; X32-NEXT: movl %esi, %ecx +; X32-NEXT: movl %edx, -496(%ebp) # 4-byte Spill +; X32-NEXT: movl %edx, %ecx ; X32-NEXT: adcl %edi, %ecx ; X32-NEXT: movl %ecx, -992(%ebp) # 4-byte Spill ; X32-NEXT: movl %eax, %ecx @@ -761,7 +761,7 @@ ; X32-NEXT: xorl %edx, %edx ; X32-NEXT: mull %edx ; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl %esi, -484(%ebp) # 4-byte Spill +; X32-NEXT: movl %eax, -484(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, -488(%ebp) # 4-byte Spill ; X32-NEXT: movl %ebx, %eax ; X32-NEXT: addl %esi, %eax @@ -793,8 +793,7 @@ ; X32-NEXT: adcl -60(%ebp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, -928(%ebp) # 4-byte Spill ; X32-NEXT: movl 8(%ebp), %ecx -; X32-NEXT: movl %ecx, %eax -; X32-NEXT: movl 84(%eax), %eax +; X32-NEXT: movl 84(%ecx), %eax ; X32-NEXT: movl %eax, -544(%ebp) # 4-byte Spill ; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: mull %ecx @@ -871,7 +870,7 @@ ; X32-NEXT: xorl %edx, %edx ; X32-NEXT: mull %edx ; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl %esi, -556(%ebp) # 4-byte Spill +; X32-NEXT: movl %eax, -556(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, -560(%ebp) # 4-byte Spill ; X32-NEXT: movl -524(%ebp), %eax # 4-byte Reload ; X32-NEXT: movl %eax, %ebx @@ -882,7 +881,7 @@ ; X32-NEXT: movl %ebx, -732(%ebp) # 4-byte Spill ; X32-NEXT: adcl %edi, %esi ; X32-NEXT: movl %esi, %edx -; X32-NEXT: movl %edx, -728(%ebp) # 4-byte Spill +; X32-NEXT: movl %esi, -728(%ebp) # 4-byte Spill ; X32-NEXT: addl -136(%ebp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, -712(%ebp) # 4-byte Spill ; X32-NEXT: movl -668(%ebp), %ecx # 4-byte Reload @@ -917,7 +916,7 @@ ; X32-NEXT: mull %ebx ; X32-NEXT: movl %eax, -564(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, %ebx -; X32-NEXT: movl %ebx, -568(%ebp) # 4-byte Spill +; X32-NEXT: movl %edx, -568(%ebp) # 4-byte Spill ; X32-NEXT: movl -500(%ebp), %edx # 4-byte Reload ; X32-NEXT: movl %edx, %edi ; X32-NEXT: addl %eax, %edi @@ -983,7 +982,7 @@ ; X32-NEXT: movzbl -88(%ebp), %eax # 1-byte Folded Reload ; X32-NEXT: adcl %edx, %eax ; X32-NEXT: movl %ecx, %edx -; X32-NEXT: addl %edx, %ebx +; X32-NEXT: addl %ecx, %ebx ; X32-NEXT: adcl %esi, %eax ; X32-NEXT: movl %eax, -88(%ebp) # 4-byte Spill ; X32-NEXT: movl -28(%ebp), %edi # 4-byte Reload @@ -1038,7 +1037,7 @@ ; X32-NEXT: mull %ecx ; X32-NEXT: movl %edx, %edi ; X32-NEXT: movl %eax, %ebx -; X32-NEXT: movl %ebx, %ecx +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload ; X32-NEXT: addl %esi, %ecx ; X32-NEXT: adcl $0, %edx @@ -1052,7 +1051,7 @@ ; X32-NEXT: movzbl -16(%ebp), %ebx # 1-byte Folded Reload ; X32-NEXT: adcl %edi, %ebx ; X32-NEXT: movl %eax, %esi -; X32-NEXT: addl %esi, %edx +; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %ebx ; X32-NEXT: movl -64(%ebp), %eax # 4-byte Reload ; X32-NEXT: addl -324(%ebp), %eax # 4-byte Folded Reload @@ -1143,7 +1142,7 @@ ; X32-NEXT: movzbl %cl, %eax ; X32-NEXT: adcl %esi, %eax ; X32-NEXT: movl %edi, %esi -; X32-NEXT: addl %esi, %edx +; X32-NEXT: addl %edi, %edx ; X32-NEXT: adcl %ebx, %eax ; X32-NEXT: movl %eax, -112(%ebp) # 4-byte Spill ; X32-NEXT: movl -136(%ebp), %edi # 4-byte Reload @@ -1223,7 +1222,7 @@ ; X32-NEXT: movzbl %bl, %eax ; X32-NEXT: adcl %edx, %eax ; X32-NEXT: movl %ecx, %edx -; X32-NEXT: addl %edx, %esi +; X32-NEXT: addl %ecx, %esi ; X32-NEXT: adcl %edi, %eax ; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill ; X32-NEXT: movl -100(%ebp), %edi # 4-byte Reload @@ -1697,7 +1696,7 @@ ; X32-NEXT: movl %esi, %eax ; X32-NEXT: mull %ebx ; X32-NEXT: movl %ebx, %esi -; X32-NEXT: movl %esi, -48(%ebp) # 4-byte Spill +; X32-NEXT: movl %ebx, -48(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, %ebx ; X32-NEXT: addl %ecx, %eax ; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill @@ -4479,7 +4478,7 @@ ; X32-NEXT: movl %esi, %eax ; X32-NEXT: mull %ebx ; X32-NEXT: movl %ebx, %esi -; X32-NEXT: movl %esi, -140(%ebp) # 4-byte Spill +; X32-NEXT: movl %ebx, -140(%ebp) # 4-byte Spill ; X32-NEXT: movl %edx, %ebx ; X32-NEXT: addl %ecx, %eax ; X32-NEXT: movl %eax, -56(%ebp) # 4-byte Spill @@ -5199,7 +5198,7 @@ ; X32-NEXT: addl %edi, %edx ; X32-NEXT: movl 124(%ebx), %ebx ; X32-NEXT: movl %ecx, %eax -; X32-NEXT: imull %eax, %ebx +; X32-NEXT: imull %ecx, %ebx ; X32-NEXT: addl %edx, %ebx ; X32-NEXT: movl -144(%ebp), %ecx # 4-byte Reload ; X32-NEXT: addl %ecx, -96(%ebp) # 4-byte Folded Spill @@ -6073,8 +6072,8 @@ ; X32-NEXT: movl 108(%eax), %edx ; X32-NEXT: movl %ebx, %eax ; X32-NEXT: movl %edx, %ebx -; X32-NEXT: movl %ebx, -112(%ebp) # 4-byte Spill -; X32-NEXT: mull %ebx +; X32-NEXT: movl %edx, -112(%ebp) # 4-byte Spill +; X32-NEXT: mull %edx ; X32-NEXT: movl %edx, %esi ; X32-NEXT: addl %ecx, %eax ; X32-NEXT: movl %eax, -128(%ebp) # 4-byte Spill @@ -6113,7 +6112,7 @@ ; X32-NEXT: movl -184(%ebp), %ecx # 4-byte Reload ; X32-NEXT: movl %ecx, %eax ; X32-NEXT: movl %ebx, %esi -; X32-NEXT: mull %esi +; X32-NEXT: mull %ebx ; X32-NEXT: movl %edx, -144(%ebp) # 4-byte Spill ; X32-NEXT: movl %eax, -280(%ebp) # 4-byte Spill ; X32-NEXT: movl -60(%ebp), %ebx # 4-byte Reload @@ -6754,7 +6753,6 @@ ; X64-NEXT: adcq $0, %rbp ; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: adcq %rdi, %rbp ; X64-NEXT: setb %bl ; X64-NEXT: movzbl %bl, %ebx @@ -6764,12 +6762,12 @@ ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r11, %r12 -; X64-NEXT: movq %r11, %r8 +; X64-NEXT: movq %rcx, %r12 +; X64-NEXT: movq %rcx, %r8 ; X64-NEXT: addq %rax, %r12 ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq %rdi, %r9 -; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: addq %rbp, %r12 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill @@ -6798,7 +6796,7 @@ ; X64-NEXT: adcq %rdx, %rbx ; X64-NEXT: movq 16(%rsi), %rax ; X64-NEXT: movq %rsi, %r13 -; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill @@ -6811,7 +6809,7 @@ ; X64-NEXT: adcq %rbx, %r11 ; X64-NEXT: movq %r8, %rax ; X64-NEXT: movq %r8, %rbp -; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %rdi, %rax ; X64-NEXT: movq %r9, %rax ; X64-NEXT: adcq %rcx, %rax @@ -6824,7 +6822,7 @@ ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rdi, %rax ; X64-NEXT: movq %rdi, %r9 -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rdx, %rax ; X64-NEXT: adcq %rcx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq 32(%r13), %rax @@ -6840,9 +6838,9 @@ ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbp, %rax -; X64-NEXT: addq %r9, %rax +; X64-NEXT: addq %rdi, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %r15, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill @@ -6860,7 +6858,7 @@ ; X64-NEXT: addq %rsi, %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %rcx, %r11 +; X64-NEXT: addq %rbx, %r11 ; X64-NEXT: adcq %rsi, %rbp ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: setb %bl @@ -6881,11 +6879,11 @@ ; X64-NEXT: adcq %rbx, %r10 ; X64-NEXT: movq %rcx, %rdx ; X64-NEXT: movq %rcx, %r12 -; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %r9, %rdx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r11, %r8 -; X64-NEXT: adcq %r8, %r15 +; X64-NEXT: adcq %r11, %r15 ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rax, %r14 ; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill @@ -6981,13 +6979,12 @@ ; X64-NEXT: adcq %rdx, %r12 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r10, %rbp -; X64-NEXT: mulq %rbp +; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rsi, %rbx @@ -7014,7 +7011,7 @@ ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r12 ; X64-NEXT: movq %r10, %rbx -; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %r10, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rcx @@ -7031,7 +7028,7 @@ ; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rcx, %rbx -; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %rbp, %r8 @@ -7062,7 +7059,7 @@ ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r11, %rsi -; X64-NEXT: mulq %rsi +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, %r13 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload @@ -7142,13 +7139,12 @@ ; X64-NEXT: adcq %rdx, %r10 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r11, %rbp -; X64-NEXT: mulq %rbp +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rdi, %rbx @@ -7278,7 +7274,7 @@ ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %r14 ; X64-NEXT: movq %r8, %rbp -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rdx, %rbx @@ -7338,7 +7334,7 @@ ; X64-NEXT: adcq $0, %r9 ; X64-NEXT: adcq $0, %r10 ; X64-NEXT: movq %rbp, %rsi -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rbp, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r14 @@ -7395,8 +7391,8 @@ ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: movq %rbp, %rax ; X64-NEXT: movq %r8, %rdi -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %rdi +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %rbx, %r8 @@ -7479,13 +7475,12 @@ ; X64-NEXT: movq %rcx, %r14 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r10, %rdi -; X64-NEXT: mulq %rdi +; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %rdi +; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %r11, %rbx @@ -7513,8 +7508,7 @@ ; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq $0, %r14 ; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r13, %rbx -; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %r13, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r8 @@ -7527,7 +7521,7 @@ ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: addq %r8, %rcx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %r13, %rax ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload ; X64-NEXT: mulq %r13 ; X64-NEXT: movq %rdx, %rbx @@ -7561,13 +7555,12 @@ ; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %r10, %rsi -; X64-NEXT: mulq %rsi +; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload ; X64-NEXT: movq %r8, %rax -; X64-NEXT: mulq %rsi +; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rdi ; X64-NEXT: addq %rcx, %rdi @@ -7643,7 +7636,7 @@ ; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rcx, %r10 -; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %rdi ; X64-NEXT: addq %rsi, %rdi @@ -7655,16 +7648,16 @@ ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload -; X64-NEXT: addq %rbx, %r12 +; X64-NEXT: addq %rax, %r12 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload -; X64-NEXT: adcq %r14, %r15 +; X64-NEXT: adcq %rdx, %r15 ; X64-NEXT: addq %rdi, %r12 ; X64-NEXT: adcq %rcx, %r15 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r11, %rsi -; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %rsi +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload @@ -7728,7 +7721,7 @@ ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: addq %r9, %rbp +; X64-NEXT: addq %rax, %rbp ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: addq %rsi, %rbp @@ -7906,7 +7899,7 @@ ; X64-NEXT: movq 88(%rsi), %rax ; X64-NEXT: movq %rsi, %r9 ; X64-NEXT: movq %rax, %rsi -; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rdx, %rbp @@ -7942,13 +7935,12 @@ ; X64-NEXT: adcq %r8, %r10 ; X64-NEXT: addq %rbx, %rsi ; X64-NEXT: adcq %rbp, %r10 -; X64-NEXT: movq %r9, %rdi -; X64-NEXT: movq 64(%rdi), %r13 +; X64-NEXT: movq 64(%r9), %r13 ; X64-NEXT: movq %r13, %rax ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq 72(%rdi), %r9 +; X64-NEXT: movq 72(%r9), %r9 ; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rbp @@ -7976,8 +7968,8 @@ ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, %r15 ; X64-NEXT: movq %r12, %rcx -; X64-NEXT: addq %r15, %rcx -; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: addq %rax, %rcx +; X64-NEXT: adcq %rdx, %r8 ; X64-NEXT: addq %rbp, %rcx ; X64-NEXT: adcq %rbx, %r8 ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload @@ -8029,14 +8021,13 @@ ; X64-NEXT: setb %r10b ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %r8, %rdi -; X64-NEXT: mulq %rdi +; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r12 +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %r8, %r12 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rcx, %rbx @@ -8075,7 +8066,7 @@ ; X64-NEXT: imulq %rcx, %rdi ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r12, %rsi -; X64-NEXT: mulq %rsi +; X64-NEXT: mulq %r12 ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rdi, %rdx ; X64-NEXT: movq 104(%rbp), %r8 Index: llvm/trunk/test/CodeGen/X86/mul-i256.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mul-i256.ll +++ llvm/trunk/test/CodeGen/X86/mul-i256.ll @@ -44,7 +44,7 @@ ; X32-NEXT: movl %edi, %eax ; X32-NEXT: mull %ecx ; X32-NEXT: movl %ecx, %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %edx, %ecx ; X32-NEXT: addl %ebx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -62,9 +62,9 @@ ; X32-NEXT: movl %ecx, %eax ; X32-NEXT: mull %edx ; X32-NEXT: movl %edx, %ebp -; X32-NEXT: movl %ebp, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl %esi, (%esp) # 4-byte Spill +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: xorl %edx, %edx ; X32-NEXT: mull %edx @@ -127,7 +127,7 @@ ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, %ecx -; X32-NEXT: movl 8(%ecx), %ebx +; X32-NEXT: movl 8(%eax), %ebx ; X32-NEXT: movl %esi, %eax ; X32-NEXT: movl %esi, %edi ; X32-NEXT: mull %ebx @@ -156,7 +156,7 @@ ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload ; X32-NEXT: adcl %eax, %esi ; X32-NEXT: movl %ebx, %edi -; X32-NEXT: movl %edi, %eax +; X32-NEXT: movl %ebx, %eax ; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: mull %ecx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill Index: llvm/trunk/test/CodeGen/X86/mul-i512.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mul-i512.ll +++ llvm/trunk/test/CodeGen/X86/mul-i512.ll @@ -31,7 +31,7 @@ ; X32-NEXT: movl %edi, (%esp) # 4-byte Spill ; X32-NEXT: adcl %ecx, %ebx ; X32-NEXT: movl %ecx, %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: setb %cl ; X32-NEXT: addl %eax, %ebx ; X32-NEXT: movzbl %cl, %ecx @@ -55,7 +55,7 @@ ; X32-NEXT: mull %ebx ; X32-NEXT: movl %eax, %ebp ; X32-NEXT: movl %edx, %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 4(%ecx), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %ecx, %esi @@ -92,14 +92,13 @@ ; X32-NEXT: adcl %edi, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, %eax -; X32-NEXT: movl (%eax), %eax +; X32-NEXT: movl (%ecx), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: xorl %ebp, %ebp ; X32-NEXT: mull %ebp ; X32-NEXT: movl %edx, %ebx ; X32-NEXT: movl %eax, %ecx -; X32-NEXT: movl %ecx, %edx +; X32-NEXT: movl %eax, %edx ; X32-NEXT: addl %esi, %edx ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %ebx, %eax @@ -113,7 +112,7 @@ ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %ecx, %edi ; X32-NEXT: movl %ecx, %ebp -; X32-NEXT: movl %ebp, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: addl %eax, %edi ; X32-NEXT: movl %ebx, %eax ; X32-NEXT: adcl %edx, %eax @@ -143,7 +142,7 @@ ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %ebx, %ecx ; X32-NEXT: movl %ebx, %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: setb %bl ; X32-NEXT: addl %eax, %ecx ; X32-NEXT: movzbl %bl, %ebx @@ -278,7 +277,7 @@ ; X32-NEXT: adcl %ebx, %ecx ; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl %edi, %ebp -; X32-NEXT: movl %ebp, %eax +; X32-NEXT: movl %edi, %eax ; X32-NEXT: mull %esi ; X32-NEXT: movl %edx, %edi ; X32-NEXT: movl %eax, %ebx @@ -433,7 +432,7 @@ ; X32-NEXT: adcl %edi, %ecx ; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl %ebx, %edi -; X32-NEXT: movl %edi, %eax +; X32-NEXT: movl %ebx, %eax ; X32-NEXT: mull %esi ; X32-NEXT: movl %eax, %ebp ; X32-NEXT: addl %ecx, %ebp @@ -899,7 +898,7 @@ ; X32-NEXT: movl %ecx, %eax ; X32-NEXT: mull %esi ; X32-NEXT: movl %esi, %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %edx, %esi ; X32-NEXT: addl %ebx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -929,7 +928,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: movl %ecx, %eax ; X32-NEXT: movl %ebx, %esi -; X32-NEXT: mull %esi +; X32-NEXT: mull %ebx ; X32-NEXT: movl %edx, %edi ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload @@ -1077,7 +1076,7 @@ ; X32-NEXT: addl %esi, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: movl %edi, %eax -; X32-NEXT: imull %eax, %esi +; X32-NEXT: imull %edi, %esi ; X32-NEXT: addl %edx, %esi ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill @@ -1177,7 +1176,7 @@ ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, %ecx -; X32-NEXT: movl 40(%ecx), %ebx +; X32-NEXT: movl 40(%esi), %ebx ; X32-NEXT: movl %ebx, %eax ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload @@ -1374,7 +1373,7 @@ ; X32-NEXT: addl %edi, %edx ; X32-NEXT: movl 60(%ebx), %ebx ; X32-NEXT: movl %ecx, %eax -; X32-NEXT: imull %eax, %ebx +; X32-NEXT: imull %ecx, %ebx ; X32-NEXT: addl %edx, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: addl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill @@ -1546,7 +1545,7 @@ ; X64-NEXT: movq 8(%rsi), %rbp ; X64-NEXT: movq %r15, %rax ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: mulq %rsi +; X64-NEXT: mulq %rdx ; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: movq %r11, %rax @@ -1569,15 +1568,15 @@ ; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rbp, %r14 -; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rcx, %rbp ; X64-NEXT: adcq %rbx, %rsi ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: movq %r10, %rbx -; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r10, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rax, %r10 @@ -1585,7 +1584,7 @@ ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %r15 -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %r10, %r15 ; X64-NEXT: adcq %r13, %rdx ; X64-NEXT: addq %rbp, %r15 @@ -1624,8 +1623,8 @@ ; X64-NEXT: mulq %rdx ; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %r11, %r10 -; X64-NEXT: adcq %r14, %r13 +; X64-NEXT: addq %rax, %r10 +; X64-NEXT: adcq %rdx, %r13 ; X64-NEXT: addq %rbp, %r10 ; X64-NEXT: adcq %rsi, %r13 ; X64-NEXT: addq %r8, %r10 @@ -1637,7 +1636,7 @@ ; X64-NEXT: movq 16(%rsi), %r8 ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %r9 -; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %r12 @@ -1668,7 +1667,7 @@ ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rbp, %r11 +; X64-NEXT: addq %rax, %r11 ; X64-NEXT: adcq %rdx, %r14 ; X64-NEXT: addq %r9, %r11 ; X64-NEXT: adcq %rbx, %r14 Index: llvm/trunk/test/CodeGen/X86/mul128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mul128.ll +++ llvm/trunk/test/CodeGen/X86/mul128.ll @@ -8,7 +8,7 @@ ; X64-NEXT: movq %rdx, %r8 ; X64-NEXT: imulq %rdi, %rcx ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: mulq %rdx ; X64-NEXT: addq %rcx, %rdx ; X64-NEXT: imulq %r8, %rsi ; X64-NEXT: addq %rsi, %rdx Index: llvm/trunk/test/CodeGen/X86/mulvi32.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mulvi32.ll +++ llvm/trunk/test/CodeGen/X86/mulvi32.ll @@ -234,7 +234,7 @@ ; SSE-LABEL: _mul4xi32toi64b: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm1, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE-NEXT: pmuludq %xmm0, %xmm1 Index: llvm/trunk/test/CodeGen/X86/pmul.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/pmul.ll +++ llvm/trunk/test/CodeGen/X86/pmul.ll @@ -9,7 +9,7 @@ ; SSE2-LABEL: mul_v16i8c: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm2, %xmm1 @@ -143,10 +143,10 @@ ; SSE2-LABEL: mul_v16i8: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] @@ -386,7 +386,7 @@ ; SSE2-LABEL: mul_v32i8c: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm3, %xmm2 @@ -398,7 +398,7 @@ ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm2, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pand %xmm4, %xmm2 @@ -567,10 +567,10 @@ ; SSE2-LABEL: mul_v32i8: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] @@ -583,10 +583,10 @@ ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm5, %xmm0 ; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm2, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5 @@ -774,7 +774,7 @@ ; SSE2-LABEL: mul_v64i8c: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm4, %xmm6 @@ -786,7 +786,7 @@ ; SSE2-NEXT: pand %xmm5, %xmm0 ; SSE2-NEXT: packuswb %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -796,7 +796,7 @@ ; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: packuswb %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -806,7 +806,7 @@ ; SSE2-NEXT: pand %xmm5, %xmm2 ; SSE2-NEXT: packuswb %xmm6, %xmm2 ; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -821,7 +821,7 @@ ; SSE41: # %bb.0: # %entry ; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 +; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117] ; SSE41-NEXT: pmullw %xmm6, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] @@ -939,10 +939,10 @@ ; SSE2-LABEL: mul_v64i8: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm4, %xmm8 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15] ; SSE2-NEXT: psraw $8, %xmm8 ; SSE2-NEXT: movdqa %xmm0, %xmm9 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: pmullw %xmm8, %xmm9 ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] @@ -955,10 +955,10 @@ ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: packuswb %xmm9, %xmm0 ; SSE2-NEXT: movdqa %xmm5, %xmm9 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15] ; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: pmullw %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm4 @@ -970,10 +970,10 @@ ; SSE2-NEXT: pand %xmm8, %xmm1 ; SSE2-NEXT: packuswb %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm6, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 @@ -985,10 +985,10 @@ ; SSE2-NEXT: pand %xmm8, %xmm2 ; SSE2-NEXT: packuswb %xmm5, %xmm2 ; SSE2-NEXT: movdqa %xmm7, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm3, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 @@ -1006,7 +1006,7 @@ ; SSE41-NEXT: movdqa %xmm1, %xmm8 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pmovsxbw %xmm4, %xmm9 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 +; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmullw %xmm9, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm9, %xmm0 @@ -1387,7 +1387,7 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7] ; SSE2-NEXT: movdqa %xmm9, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0 ; SSE2-NEXT: psrad $16, %xmm9 Index: llvm/trunk/test/CodeGen/X86/powi.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/powi.ll +++ llvm/trunk/test/CodeGen/X86/powi.ll @@ -5,7 +5,7 @@ ; CHECK-LABEL: pow_wrapper: ; CHECK: # %bb.0: ; CHECK-NEXT: movapd %xmm0, %xmm1 -; CHECK-NEXT: mulsd %xmm1, %xmm1 +; CHECK-NEXT: mulsd %xmm0, %xmm1 ; CHECK-NEXT: mulsd %xmm1, %xmm0 ; CHECK-NEXT: mulsd %xmm1, %xmm1 ; CHECK-NEXT: mulsd %xmm1, %xmm0 Index: llvm/trunk/test/CodeGen/X86/pr11334.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/pr11334.ll +++ llvm/trunk/test/CodeGen/X86/pr11334.ll @@ -25,7 +25,7 @@ ; SSE-NEXT: cvtps2pd %xmm0, %xmm0 ; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm2, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1] ; SSE-NEXT: fldl -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq Index: llvm/trunk/test/CodeGen/X86/pr29112.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/pr29112.ll +++ llvm/trunk/test/CodeGen/X86/pr29112.ll @@ -49,13 +49,13 @@ ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0] ; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2 ; CHECK-NEXT: vmovaps %xmm15, %xmm1 -; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9 +; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill +; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9 ; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0 -; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8 +; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8 ; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3 ; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0 ; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovaps %xmm9, (%rsp) ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload Index: llvm/trunk/test/CodeGen/X86/pr34080-2.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/pr34080-2.ll +++ llvm/trunk/test/CodeGen/X86/pr34080-2.ll @@ -23,7 +23,7 @@ ; CHECK-NEXT: movl %esi, %eax ; CHECK-NEXT: imull %ecx ; CHECK-NEXT: movl %edx, %ecx -; CHECK-NEXT: movl %ecx, %eax +; CHECK-NEXT: movl %edx, %eax ; CHECK-NEXT: shrl $31, %eax ; CHECK-NEXT: sarl $5, %ecx ; CHECK-NEXT: addl %eax, %ecx @@ -31,7 +31,7 @@ ; CHECK-NEXT: movl %esi, %eax ; CHECK-NEXT: imull %edx ; CHECK-NEXT: movl %edx, %edi -; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: movl %edx, %eax ; CHECK-NEXT: shrl $31, %eax ; CHECK-NEXT: sarl $7, %edi ; CHECK-NEXT: addl %eax, %edi Index: llvm/trunk/test/CodeGen/X86/psubus.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/psubus.ll +++ llvm/trunk/test/CodeGen/X86/psubus.ll @@ -683,7 +683,7 @@ ; SSE41-LABEL: test14: ; SSE41: # %bb.0: # %vector.ph ; SSE41-NEXT: movdqa %xmm0, %xmm5 -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1] @@ -1247,7 +1247,7 @@ ; SSE2-NEXT: movdqa %xmm1, %xmm9 ; SSE2-NEXT: movdqa %xmm0, %xmm8 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: movdqa %xmm8, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm3, %xmm1 ; SSE2-NEXT: movdqa %xmm4, %xmm0 ; SSE2-NEXT: pxor %xmm3, %xmm0 @@ -1295,7 +1295,7 @@ ; SSSE3-NEXT: movdqa %xmm1, %xmm9 ; SSSE3-NEXT: movdqa %xmm0, %xmm8 ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSSE3-NEXT: movdqa %xmm8, %xmm1 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: pxor %xmm3, %xmm1 ; SSSE3-NEXT: movdqa %xmm4, %xmm0 ; SSSE3-NEXT: pxor %xmm3, %xmm0 @@ -1950,7 +1950,7 @@ ; SSE2-NEXT: movdqa %xmm9, %xmm11 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm8, %xmm10 +; SSE2-NEXT: movdqa %xmm1, %xmm10 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3] ; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648] @@ -2013,7 +2013,7 @@ ; SSSE3-NEXT: movdqa %xmm9, %xmm11 ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3] -; SSSE3-NEXT: movdqa %xmm8, %xmm10 +; SSSE3-NEXT: movdqa %xmm1, %xmm10 ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3] ; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648] Index: llvm/trunk/test/CodeGen/X86/retpoline-external.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/retpoline-external.ll +++ llvm/trunk/test/CodeGen/X86/retpoline-external.ll @@ -19,7 +19,7 @@ ; X64-LABEL: icall_reg: ; X64-DAG: movq %rdi, %[[fp:[^ ]*]] ; X64-DAG: movl %esi, %[[x:[^ ]*]] -; X64: movl %[[x]], %edi +; X64: movl %esi, %edi ; X64: callq bar ; X64-DAG: movl %[[x]], %edi ; X64-DAG: movq %[[fp]], %r11 @@ -111,7 +111,7 @@ ; X64-LABEL: vcall: ; X64: movq %rdi, %[[obj:[^ ]*]] -; X64: movq (%[[obj]]), %[[vptr:[^ ]*]] +; X64: movq (%rdi), %[[vptr:[^ ]*]] ; X64: movq 8(%[[vptr]]), %[[fp:[^ ]*]] ; X64: movq %[[fp]], %r11 ; X64: callq __llvm_external_retpoline_r11 Index: llvm/trunk/test/CodeGen/X86/retpoline.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/retpoline.ll +++ llvm/trunk/test/CodeGen/X86/retpoline.ll @@ -19,7 +19,7 @@ ; X64-LABEL: icall_reg: ; X64-DAG: movq %rdi, %[[fp:[^ ]*]] ; X64-DAG: movl %esi, %[[x:[^ ]*]] -; X64: movl %[[x]], %edi +; X64: movl %esi, %edi ; X64: callq bar ; X64-DAG: movl %[[x]], %edi ; X64-DAG: movq %[[fp]], %r11 @@ -111,7 +111,7 @@ ; X64-LABEL: vcall: ; X64: movq %rdi, %[[obj:[^ ]*]] -; X64: movq (%[[obj]]), %[[vptr:[^ ]*]] +; X64: movq (%rdi), %[[vptr:[^ ]*]] ; X64: movq 8(%[[vptr]]), %[[fp:[^ ]*]] ; X64: movq %[[fp]], %r11 ; X64: callq __llvm_retpoline_r11 Index: llvm/trunk/test/CodeGen/X86/sad.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sad.ll +++ llvm/trunk/test/CodeGen/X86/sad.ll @@ -653,7 +653,7 @@ ; SSE2-NEXT: paddd %xmm7, %xmm0 ; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm13, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm13, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0 ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm0, %xmm1 Index: llvm/trunk/test/CodeGen/X86/safestack.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/safestack.ll +++ llvm/trunk/test/CodeGen/X86/safestack.ll @@ -40,6 +40,6 @@ ; LINUX-I386-PA: calll __safestack_pointer_address ; LINUX-I386-PA: movl %eax, %[[A:.*]] -; LINUX-I386-PA: movl (%[[A]]), %[[B:.*]] +; LINUX-I386-PA: movl (%eax), %[[B:.*]] ; LINUX-I386-PA: leal -16(%[[B]]), %[[C:.*]] ; LINUX-I386-PA: movl %[[C]], (%[[A]]) Index: llvm/trunk/test/CodeGen/X86/safestack_inline.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/safestack_inline.ll +++ llvm/trunk/test/CodeGen/X86/safestack_inline.ll @@ -25,6 +25,6 @@ ; CALL: callq __safestack_pointer_address ; CALL: movq %rax, %[[A:.*]] -; CALL: movq (%[[A]]), %[[B:.*]] +; CALL: movq (%rax), %[[B:.*]] ; CALL: leaq -16(%[[B]]), %[[C:.*]] ; CALL: movq %[[C]], (%[[A]]) Index: llvm/trunk/test/CodeGen/X86/scalar_widen_div.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/scalar_widen_div.ll +++ llvm/trunk/test/CodeGen/X86/scalar_widen_div.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: movq %rdx, %r8 ; CHECK-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %r8, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movslq -{{[0-9]+}}(%rsp), %rcx ; CHECK-NEXT: pmovsxdq (%rdi,%rcx,8), %xmm0 ; CHECK-NEXT: pmovsxdq (%rsi,%rcx,8), %xmm1 @@ -403,7 +403,7 @@ ; CHECK-LABEL: test_int_div: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movl %edx, %r9d -; CHECK-NEXT: testl %r9d, %r9d +; CHECK-NEXT: testl %edx, %edx ; CHECK-NEXT: jle .LBB12_3 ; CHECK-NEXT: # %bb.1: # %bb.nph ; CHECK-NEXT: xorl %ecx, %ecx Index: llvm/trunk/test/CodeGen/X86/select.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/select.ll +++ llvm/trunk/test/CodeGen/X86/select.ll @@ -22,8 +22,7 @@ ; MCU-NEXT: jne .LBB0_1 ; MCU-NEXT: # %bb.2: ; MCU-NEXT: addl $8, %edx -; MCU-NEXT: movl %edx, %eax -; MCU-NEXT: movl (%eax), %eax +; MCU-NEXT: movl (%edx), %eax ; MCU-NEXT: retl ; MCU-NEXT: .LBB0_1: ; MCU-NEXT: addl $8, %eax Index: llvm/trunk/test/CodeGen/X86/shrink-wrap-chkstk.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/shrink-wrap-chkstk.ll +++ llvm/trunk/test/CodeGen/X86/shrink-wrap-chkstk.ll @@ -61,7 +61,7 @@ ; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue ; CHECK: movl %ecx, %eax -; CHECK: cmpl %edx, %eax +; CHECK: cmpl %edx, %ecx ; CHECK: jge LBB1_2 ; CHECK: pushl %eax ; CHECK: movl $4092, %eax Index: llvm/trunk/test/CodeGen/X86/slow-pmulld.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/slow-pmulld.ll +++ llvm/trunk/test/CodeGen/X86/slow-pmulld.ll @@ -614,7 +614,7 @@ ; SLOW32-NEXT: movdqa %xmm1, %xmm3 ; SLOW32-NEXT: movdqa %xmm0, %xmm1 ; SLOW32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778,18778,18778,18778,18778] -; SLOW32-NEXT: movdqa %xmm1, %xmm4 +; SLOW32-NEXT: movdqa %xmm0, %xmm4 ; SLOW32-NEXT: pmulhuw %xmm2, %xmm4 ; SLOW32-NEXT: pmullw %xmm2, %xmm1 ; SLOW32-NEXT: movdqa %xmm1, %xmm0 @@ -633,7 +633,7 @@ ; SLOW64-NEXT: movdqa %xmm1, %xmm3 ; SLOW64-NEXT: movdqa %xmm0, %xmm1 ; SLOW64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778,18778,18778,18778,18778] -; SLOW64-NEXT: movdqa %xmm1, %xmm4 +; SLOW64-NEXT: movdqa %xmm0, %xmm4 ; SLOW64-NEXT: pmulhuw %xmm2, %xmm4 ; SLOW64-NEXT: pmullw %xmm2, %xmm1 ; SLOW64-NEXT: movdqa %xmm1, %xmm0 Index: llvm/trunk/test/CodeGen/X86/sqrt-fastmath.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sqrt-fastmath.ll +++ llvm/trunk/test/CodeGen/X86/sqrt-fastmath.ll @@ -201,7 +201,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: rsqrtss %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: mulss %xmm2, %xmm2 +; SSE-NEXT: mulss %xmm1, %xmm2 ; SSE-NEXT: mulss %xmm0, %xmm2 ; SSE-NEXT: addss {{.*}}(%rip), %xmm2 ; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 @@ -247,7 +247,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: rsqrtps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: mulps %xmm2, %xmm2 +; SSE-NEXT: mulps %xmm1, %xmm2 ; SSE-NEXT: mulps %xmm0, %xmm2 ; SSE-NEXT: addps {{.*}}(%rip), %xmm2 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 @@ -297,7 +297,7 @@ ; SSE-NEXT: rsqrtps %xmm0, %xmm3 ; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01] ; SSE-NEXT: movaps %xmm3, %xmm2 -; SSE-NEXT: mulps %xmm2, %xmm2 +; SSE-NEXT: mulps %xmm3, %xmm2 ; SSE-NEXT: mulps %xmm0, %xmm2 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00] ; SSE-NEXT: addps %xmm0, %xmm2 @@ -305,7 +305,7 @@ ; SSE-NEXT: mulps %xmm3, %xmm2 ; SSE-NEXT: rsqrtps %xmm1, %xmm5 ; SSE-NEXT: movaps %xmm5, %xmm3 -; SSE-NEXT: mulps %xmm3, %xmm3 +; SSE-NEXT: mulps %xmm5, %xmm3 ; SSE-NEXT: mulps %xmm1, %xmm3 ; SSE-NEXT: addps %xmm0, %xmm3 ; SSE-NEXT: mulps %xmm4, %xmm3 Index: llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll +++ llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll @@ -1084,8 +1084,7 @@ ; SSE2-NEXT: testb $1, %dil ; SSE2-NEXT: jne .LBB62_1 ; SSE2-NEXT: # %bb.2: -; SSE2-NEXT: movaps %xmm2, %xmm1 -; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] ; SSE2-NEXT: retq ; SSE2-NEXT: .LBB62_1: ; SSE2-NEXT: addss %xmm0, %xmm1 @@ -1097,8 +1096,7 @@ ; SSE41-NEXT: testb $1, %dil ; SSE41-NEXT: jne .LBB62_1 ; SSE41-NEXT: # %bb.2: -; SSE41-NEXT: movaps %xmm2, %xmm1 -; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] ; SSE41-NEXT: retq ; SSE41-NEXT: .LBB62_1: ; SSE41-NEXT: addss %xmm0, %xmm1 @@ -1139,8 +1137,7 @@ ; SSE2-NEXT: testb $1, %dil ; SSE2-NEXT: jne .LBB63_1 ; SSE2-NEXT: # %bb.2: -; SSE2-NEXT: movapd %xmm2, %xmm1 -; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] ; SSE2-NEXT: retq ; SSE2-NEXT: .LBB63_1: ; SSE2-NEXT: addsd %xmm0, %xmm1 @@ -1152,8 +1149,7 @@ ; SSE41-NEXT: testb $1, %dil ; SSE41-NEXT: jne .LBB63_1 ; SSE41-NEXT: # %bb.2: -; SSE41-NEXT: movaps %xmm2, %xmm1 -; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3] ; SSE41-NEXT: retq ; SSE41-NEXT: .LBB63_1: ; SSE41-NEXT: addsd %xmm0, %xmm1 Index: llvm/trunk/test/CodeGen/X86/sse1.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse1.ll +++ llvm/trunk/test/CodeGen/X86/sse1.ll @@ -16,7 +16,7 @@ ; X32-LABEL: test4: ; X32: # %bb.0: # %entry ; X32-NEXT: movaps %xmm0, %xmm2 -; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] +; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] ; X32-NEXT: addss %xmm1, %xmm0 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; X32-NEXT: subss %xmm1, %xmm2 @@ -26,7 +26,7 @@ ; X64-LABEL: test4: ; X64: # %bb.0: # %entry ; X64-NEXT: movaps %xmm0, %xmm2 -; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] +; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] ; X64-NEXT: addss %xmm1, %xmm0 ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; X64-NEXT: subss %xmm1, %xmm2 Index: llvm/trunk/test/CodeGen/X86/sse3-avx-addsub-2.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse3-avx-addsub-2.ll +++ llvm/trunk/test/CodeGen/X86/sse3-avx-addsub-2.ll @@ -406,9 +406,9 @@ ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: subss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1] ; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1] ; SSE-NEXT: subss %xmm4, %xmm3 ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE-NEXT: addss %xmm0, %xmm4 Index: llvm/trunk/test/CodeGen/X86/statepoint-live-in.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/statepoint-live-in.ll +++ llvm/trunk/test/CodeGen/X86/statepoint-live-in.ll @@ -114,7 +114,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movl %edi, %ebx -; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%rsp) +; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: callq _baz ; CHECK-NEXT: Ltmp6: ; CHECK-NEXT: callq _bar Index: llvm/trunk/test/CodeGen/X86/statepoint-stack-usage.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/statepoint-stack-usage.ll +++ llvm/trunk/test/CodeGen/X86/statepoint-stack-usage.ll @@ -61,9 +61,9 @@ gc "statepoint-example" { ; CHECK-LABEL: back_to_back_deopt ; The exact stores don't matter, but there need to be three stack slots created -; CHECK-DAG: movl %ebx, 12(%rsp) -; CHECK-DAG: movl %ebp, 8(%rsp) -; CHECK-DAG: movl %r14d, 4(%rsp) +; CHECK-DAG: movl %edi, 12(%rsp) +; CHECK-DAG: movl %esi, 8(%rsp) +; CHECK-DAG: movl %edx, 4(%rsp) ; CHECK: callq ; CHECK-DAG: movl %ebx, 12(%rsp) ; CHECK-DAG: movl %ebp, 8(%rsp) Index: llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll +++ llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll @@ -1016,12 +1016,12 @@ ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] @@ -1124,12 +1124,12 @@ ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] @@ -1314,11 +1314,11 @@ ; SSE-LABEL: fptoui_4f32_to_4i32: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] @@ -1556,7 +1556,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm2[1],xmm3[1] ; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] @@ -1568,11 +1568,11 @@ ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3] ; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1] ; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] @@ -1683,7 +1683,7 @@ ; SSE-NEXT: cmovaeq %rcx, %rdx ; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1694,7 +1694,7 @@ ; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1861,7 +1861,7 @@ ; SSE-NEXT: cmovaeq %rcx, %rdx ; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1872,7 +1872,7 @@ ; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx Index: llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll +++ llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll @@ -1591,7 +1591,7 @@ ; SSE-LABEL: uitofp_2i64_to_4f32: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: movq %xmm1, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB39_1 ; SSE-NEXT: # %bb.2: @@ -1819,7 +1819,7 @@ ; SSE-LABEL: uitofp_4i64_to_4f32_undef: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: movq %xmm1, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB41_1 ; SSE-NEXT: # %bb.2: Index: llvm/trunk/test/CodeGen/X86/vec_minmax_sint.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_minmax_sint.ll +++ llvm/trunk/test/CodeGen/X86/vec_minmax_sint.ll @@ -454,7 +454,7 @@ ; SSE42: # %bb.0: ; SSE42-NEXT: movdqa %xmm0, %xmm2 ; SSE42-NEXT: movdqa %xmm1, %xmm3 -; SSE42-NEXT: pcmpgtq %xmm2, %xmm3 +; SSE42-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE42-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE42-NEXT: pxor %xmm3, %xmm0 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1 Index: llvm/trunk/test/CodeGen/X86/vec_shift4.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_shift4.ll +++ llvm/trunk/test/CodeGen/X86/vec_shift4.ll @@ -35,7 +35,7 @@ ; X32: # %bb.0: # %entry ; X32-NEXT: movdqa %xmm0, %xmm2 ; X32-NEXT: psllw $5, %xmm1 -; X32-NEXT: movdqa %xmm2, %xmm3 +; X32-NEXT: movdqa %xmm0, %xmm3 ; X32-NEXT: psllw $4, %xmm3 ; X32-NEXT: pand {{\.LCPI.*}}, %xmm3 ; X32-NEXT: movdqa %xmm1, %xmm0 @@ -47,7 +47,7 @@ ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X32-NEXT: movdqa %xmm2, %xmm3 -; X32-NEXT: paddb %xmm3, %xmm3 +; X32-NEXT: paddb %xmm2, %xmm3 ; X32-NEXT: paddb %xmm1, %xmm1 ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2 @@ -58,7 +58,7 @@ ; X64: # %bb.0: # %entry ; X64-NEXT: movdqa %xmm0, %xmm2 ; X64-NEXT: psllw $5, %xmm1 -; X64-NEXT: movdqa %xmm2, %xmm3 +; X64-NEXT: movdqa %xmm0, %xmm3 ; X64-NEXT: psllw $4, %xmm3 ; X64-NEXT: pand {{.*}}(%rip), %xmm3 ; X64-NEXT: movdqa %xmm1, %xmm0 @@ -70,7 +70,7 @@ ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X64-NEXT: movdqa %xmm2, %xmm3 -; X64-NEXT: paddb %xmm3, %xmm3 +; X64-NEXT: paddb %xmm2, %xmm3 ; X64-NEXT: paddb %xmm1, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2 Index: llvm/trunk/test/CodeGen/X86/vector-blend.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-blend.ll +++ llvm/trunk/test/CodeGen/X86/vector-blend.ll @@ -955,7 +955,7 @@ ; SSE41: # %bb.0: # %entry ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: pxor %xmm3, %xmm3 -; SSE41-NEXT: psubd %xmm2, %xmm3 +; SSE41-NEXT: psubd %xmm0, %xmm3 ; SSE41-NEXT: movaps %xmm1, %xmm0 ; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3 ; SSE41-NEXT: movaps %xmm3, %xmm0 Index: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -177,13 +177,13 @@ ; SSE2-LABEL: test_div7_16i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 @@ -501,13 +501,13 @@ ; SSE2-LABEL: test_rem7_16i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 @@ -523,7 +523,7 @@ ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: paddb %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] ; SSE2-NEXT: pmullw %xmm3, %xmm2 Index: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -497,7 +497,7 @@ ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] ; SSE2-NEXT: pmullw %xmm3, %xmm2 Index: llvm/trunk/test/CodeGen/X86/vector-mul.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-mul.ll +++ llvm/trunk/test/CodeGen/X86/vector-mul.ll @@ -178,7 +178,7 @@ ; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: ; X86: # %bb.0: ; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: movdqa %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm2 ; X86-NEXT: psllw $4, %xmm2 ; X86-NEXT: pand {{\.LCPI.*}}, %xmm2 ; X86-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640] @@ -189,7 +189,7 @@ ; X86-NEXT: paddb %xmm0, %xmm0 ; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; X86-NEXT: movdqa %xmm1, %xmm2 -; X86-NEXT: paddb %xmm2, %xmm2 +; X86-NEXT: paddb %xmm1, %xmm2 ; X86-NEXT: paddb %xmm0, %xmm0 ; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; X86-NEXT: movdqa %xmm1, %xmm0 @@ -198,7 +198,7 @@ ; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: ; X64: # %bb.0: ; X64-NEXT: movdqa %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm2 +; X64-NEXT: movdqa %xmm0, %xmm2 ; X64-NEXT: psllw $4, %xmm2 ; X64-NEXT: pand {{.*}}(%rip), %xmm2 ; X64-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640] @@ -209,7 +209,7 @@ ; X64-NEXT: paddb %xmm0, %xmm0 ; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm2 -; X64-NEXT: paddb %xmm2, %xmm2 +; X64-NEXT: paddb %xmm1, %xmm2 ; X64-NEXT: paddb %xmm0, %xmm0 ; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm0 Index: llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll @@ -359,7 +359,7 @@ ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm4 -; SSE41-NEXT: paddw %xmm4, %xmm4 +; SSE41-NEXT: paddw %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm3, %xmm6 ; SSE41-NEXT: psllw $8, %xmm6 ; SSE41-NEXT: movdqa %xmm3, %xmm5 @@ -384,7 +384,7 @@ ; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: por %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: paddw %xmm1, %xmm1 +; SSE41-NEXT: paddw %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm3, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm2, %xmm0 @@ -629,10 +629,10 @@ ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; SSE41-NEXT: psubb %xmm3, %xmm2 ; SSE41-NEXT: psllw $5, %xmm3 -; SSE41-NEXT: movdqa %xmm1, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: psllw $4, %xmm5 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5 -; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: movdqa %xmm0, %xmm4 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: movdqa %xmm4, %xmm5 @@ -642,13 +642,13 @@ ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: movdqa %xmm4, %xmm5 -; SSE41-NEXT: paddb %xmm5, %xmm5 +; SSE41-NEXT: paddb %xmm4, %xmm5 ; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: psllw $5, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm5 ; SSE41-NEXT: psrlw $4, %xmm5 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5 @@ -1202,7 +1202,7 @@ ; SSE41-LABEL: constant_rotate_v16i8: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: psllw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256] @@ -1214,7 +1214,7 @@ ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm1, %xmm3 Index: llvm/trunk/test/CodeGen/X86/vector-sext.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-sext.ll +++ llvm/trunk/test/CodeGen/X86/vector-sext.ll @@ -243,7 +243,7 @@ ; SSSE3-LABEL: sext_16i8_to_8i32: ; SSSE3: # %bb.0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: psrad $24, %xmm0 ; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7] @@ -312,7 +312,7 @@ ; SSSE3-LABEL: sext_16i8_to_16i32: ; SSSE3: # %bb.0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: psrad $24, %xmm0 ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15] @@ -443,7 +443,7 @@ ; SSSE3-LABEL: sext_16i8_to_4i64: ; SSSE3: # %bb.0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: movdqa %xmm0, %xmm2 ; SSSE3-NEXT: psrad $31, %xmm2 @@ -499,7 +499,7 @@ ; SSE2-LABEL: sext_16i8_to_8i64: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: psrad $31, %xmm2 @@ -1108,7 +1108,7 @@ ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: psrad $31, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: psrad $31, %xmm4 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] @@ -1127,7 +1127,7 @@ ; SSSE3-NEXT: movdqa %xmm1, %xmm2 ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: psrad $31, %xmm3 -; SSSE3-NEXT: movdqa %xmm2, %xmm4 +; SSSE3-NEXT: movdqa %xmm1, %xmm4 ; SSSE3-NEXT: psrad $31, %xmm4 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] Index: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll @@ -273,7 +273,7 @@ ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psraw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 Index: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll @@ -243,7 +243,7 @@ ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -408,7 +408,7 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: psrlw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -701,7 +701,7 @@ ; SSE41-NEXT: pshufb %xmm0, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psrlw $4, %xmm4 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4 @@ -1147,7 +1147,7 @@ ; SSE41-LABEL: constant_shift_v16i8: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psrlw $4, %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32] Index: llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll @@ -200,7 +200,7 @@ ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psllw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -362,7 +362,7 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: psllw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -374,7 +374,7 @@ ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: paddb %xmm1, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 @@ -649,7 +649,7 @@ ; SSE41-NEXT: pshufb %xmm0, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psllw $4, %xmm4 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4 @@ -661,7 +661,7 @@ ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: paddb %xmm2, %xmm1 ; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 @@ -1001,7 +1001,7 @@ ; SSE41-LABEL: constant_shift_v16i8: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32] @@ -1012,7 +1012,7 @@ ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: paddb %xmm2, %xmm2 +; SSE41-NEXT: paddb %xmm1, %xmm2 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 Index: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll +++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll @@ -2703,7 +2703,7 @@ ; SSE-LABEL: PR22377: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3] ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2] ; SSE-NEXT: addps %xmm0, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] Index: llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll +++ llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll @@ -5511,7 +5511,7 @@ ; SSE-LABEL: mul_add_const_v4i64_v4i32: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] Index: llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll +++ llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll @@ -493,7 +493,7 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm8 ; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [2147483648,0,2147483648,0] -; SSE41-NEXT: movdqa %xmm8, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: pxor %xmm11, %xmm5 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483647,2147483647] ; SSE41-NEXT: movdqa %xmm0, %xmm6 @@ -1873,9 +1873,9 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm6, %xmm10 -; SSE2-NEXT: movdqa %xmm10, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm5, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm4, %xmm6 ; SSE2-NEXT: movdqa %xmm3, %xmm5 ; SSE2-NEXT: movdqa %xmm1, %xmm3 @@ -2103,9 +2103,9 @@ ; SSSE3: # %bb.0: ; SSSE3-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm6, %xmm10 -; SSSE3-NEXT: movdqa %xmm10, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSSE3-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm5, %xmm11 -; SSSE3-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSSE3-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm4, %xmm6 ; SSSE3-NEXT: movdqa %xmm3, %xmm5 ; SSSE3-NEXT: movdqa %xmm1, %xmm3 @@ -2332,7 +2332,7 @@ ; SSE41-LABEL: trunc_packus_v16i64_v16i8: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm7, %xmm8 -; SSE41-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE41-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movdqa %xmm6, %xmm7 ; SSE41-NEXT: movdqa %xmm5, %xmm6 ; SSE41-NEXT: movdqa %xmm4, %xmm5 Index: llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll +++ llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll @@ -521,7 +521,7 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm8 ; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0] -; SSE41-NEXT: movdqa %xmm8, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: pxor %xmm4, %xmm5 ; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [4294967295,4294967295] ; SSE41-NEXT: movdqa %xmm10, %xmm6 @@ -997,7 +997,7 @@ ; SSE41-NEXT: pand %xmm6, %xmm5 ; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] ; SSE41-NEXT: por %xmm5, %xmm7 -; SSE41-NEXT: movdqa %xmm8, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: pxor %xmm4, %xmm5 ; SSE41-NEXT: movdqa %xmm10, %xmm6 ; SSE41-NEXT: pcmpgtd %xmm5, %xmm6 @@ -1795,7 +1795,7 @@ ; SSE41-NEXT: pand %xmm6, %xmm5 ; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] ; SSE41-NEXT: por %xmm5, %xmm7 -; SSE41-NEXT: movdqa %xmm8, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: pxor %xmm4, %xmm5 ; SSE41-NEXT: movdqa %xmm10, %xmm6 ; SSE41-NEXT: pcmpgtd %xmm5, %xmm6 @@ -1983,9 +1983,9 @@ ; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm5, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm4, %xmm12 -; SSE2-NEXT: movdqa %xmm12, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm5 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: movdqa %xmm1, %xmm3 @@ -2093,7 +2093,7 @@ ; SSE2-NEXT: pandn %xmm1, %xmm10 ; SSE2-NEXT: por %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm10, %xmm5 -; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm10, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload ; SSE2-NEXT: pand %xmm9, %xmm0 ; SSE2-NEXT: pandn %xmm1, %xmm9 @@ -2236,9 +2236,9 @@ ; SSSE3-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm5, %xmm8 -; SSSE3-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSSE3-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm4, %xmm12 -; SSSE3-NEXT: movdqa %xmm12, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSSE3-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm3, %xmm5 ; SSSE3-NEXT: movdqa %xmm2, %xmm4 ; SSSE3-NEXT: movdqa %xmm1, %xmm3 @@ -2346,7 +2346,7 @@ ; SSSE3-NEXT: pandn %xmm1, %xmm10 ; SSSE3-NEXT: por %xmm0, %xmm10 ; SSSE3-NEXT: movdqa %xmm10, %xmm5 -; SSSE3-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSSE3-NEXT: movdqa %xmm10, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload ; SSSE3-NEXT: pand %xmm9, %xmm0 ; SSSE3-NEXT: pandn %xmm1, %xmm9 @@ -2488,7 +2488,7 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movdqa %xmm6, %xmm9 -; SSE41-NEXT: movdqa %xmm9, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE41-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movdqa %xmm3, %xmm15 @@ -2591,12 +2591,12 @@ ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm2 ; SSE41-NEXT: movapd %xmm2, %xmm4 -; SSE41-NEXT: movapd %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE41-NEXT: movapd %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movapd %xmm13, %xmm1 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: blendvpd %xmm0, -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload ; SSE41-NEXT: movapd %xmm1, %xmm2 -; SSE41-NEXT: movapd %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE41-NEXT: movapd %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movapd %xmm13, %xmm1 ; SSE41-NEXT: movdqa %xmm9, %xmm0 ; SSE41-NEXT: blendvpd %xmm0, -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload Index: llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll +++ llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll @@ -869,7 +869,7 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm1, %xmm8 ; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm8, %xmm5 +; SSE2-NEXT: movdqa %xmm1, %xmm5 ; SSE2-NEXT: pxor %xmm7, %xmm5 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2147549183,2147549183,2147549183,2147549183] ; SSE2-NEXT: movdqa %xmm1, %xmm6 @@ -913,7 +913,7 @@ ; SSSE3: # %bb.0: ; SSSE3-NEXT: movdqa %xmm1, %xmm8 ; SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm8, %xmm5 +; SSSE3-NEXT: movdqa %xmm1, %xmm5 ; SSSE3-NEXT: pxor %xmm7, %xmm5 ; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [2147549183,2147549183,2147549183,2147549183] ; SSSE3-NEXT: movdqa %xmm1, %xmm6 @@ -1301,7 +1301,7 @@ ; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm4, %xmm12 -; SSE2-NEXT: movdqa %xmm12, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm4 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -1440,7 +1440,7 @@ ; SSSE3-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm4, %xmm12 -; SSSE3-NEXT: movdqa %xmm12, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSSE3-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSSE3-NEXT: movdqa %xmm3, %xmm4 ; SSSE3-NEXT: movdqa %xmm2, %xmm3 ; SSSE3-NEXT: movdqa %xmm1, %xmm2 @@ -1577,11 +1577,11 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movdqa %xmm6, %xmm11 -; SSE41-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE41-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movdqa %xmm5, %xmm12 -; SSE41-NEXT: movdqa %xmm12, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE41-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movdqa %xmm4, %xmm13 -; SSE41-NEXT: movdqa %xmm13, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE41-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movdqa %xmm3, %xmm6 ; SSE41-NEXT: movdqa %xmm2, %xmm5 ; SSE41-NEXT: movdqa %xmm1, %xmm4 @@ -1592,13 +1592,13 @@ ; SSE41-NEXT: pxor %xmm9, %xmm11 ; SSE41-NEXT: pxor %xmm9, %xmm12 ; SSE41-NEXT: pxor %xmm9, %xmm13 -; SSE41-NEXT: movdqa %xmm6, %xmm14 +; SSE41-NEXT: movdqa %xmm3, %xmm14 ; SSE41-NEXT: pxor %xmm9, %xmm14 -; SSE41-NEXT: movdqa %xmm5, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm3 ; SSE41-NEXT: pxor %xmm9, %xmm3 -; SSE41-NEXT: movdqa %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: pxor %xmm9, %xmm2 -; SSE41-NEXT: pxor %xmm8, %xmm9 +; SSE41-NEXT: pxor %xmm0, %xmm9 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259711,9223372039002259711] ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: pcmpgtd %xmm9, %xmm0 Index: llvm/trunk/test/CodeGen/X86/vector-zext.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-zext.ll +++ llvm/trunk/test/CodeGen/X86/vector-zext.ll @@ -247,7 +247,7 @@ ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] @@ -262,7 +262,7 @@ ; SSSE3: # %bb.0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: pxor %xmm4, %xmm4 -; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] @@ -400,7 +400,7 @@ ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: movdqa %xmm1, %xmm0 @@ -701,7 +701,7 @@ ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] @@ -716,7 +716,7 @@ ; SSSE3: # %bb.0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: pxor %xmm4, %xmm4 -; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] @@ -1583,7 +1583,7 @@ ; SSE41: # %bb.0: # %entry ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE41-NEXT: retq ; @@ -1631,7 +1631,7 @@ ; SSE41: # %bb.0: # %entry ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero +; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE41-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/vselect-minmax.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vselect-minmax.ll +++ llvm/trunk/test/CodeGen/X86/vselect-minmax.ll @@ -3344,12 +3344,12 @@ ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm3, %xmm8 ; SSE2-NEXT: movdqa %xmm2, %xmm9 -; SSE2-NEXT: movdqa %xmm8, %xmm12 +; SSE2-NEXT: movdqa %xmm3, %xmm12 ; SSE2-NEXT: pcmpgtb %xmm7, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm13, %xmm3 -; SSE2-NEXT: movdqa %xmm9, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -3487,12 +3487,12 @@ ; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm7, %xmm12 -; SSE2-NEXT: pcmpgtb %xmm8, %xmm12 +; SSE2-NEXT: pcmpgtb %xmm3, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtb %xmm9, %xmm13 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -4225,12 +4225,12 @@ ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm3, %xmm8 ; SSE2-NEXT: movdqa %xmm2, %xmm9 -; SSE2-NEXT: movdqa %xmm8, %xmm12 +; SSE2-NEXT: movdqa %xmm3, %xmm12 ; SSE2-NEXT: pcmpgtd %xmm7, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm13, %xmm3 -; SSE2-NEXT: movdqa %xmm9, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -4368,12 +4368,12 @@ ; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm7, %xmm12 -; SSE2-NEXT: pcmpgtd %xmm8, %xmm12 +; SSE2-NEXT: pcmpgtd %xmm3, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtd %xmm9, %xmm13 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -4890,7 +4890,7 @@ ; SSE2-LABEL: test122: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5164,7 +5164,7 @@ ; SSE2-LABEL: test124: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5467,7 +5467,7 @@ ; SSE2-LABEL: test126: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5795,7 +5795,7 @@ ; SSE2-LABEL: test128: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -6047,7 +6047,7 @@ ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm13, %xmm9 -; SSE2-NEXT: movdqa %xmm8, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -6190,7 +6190,7 @@ ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtb %xmm8, %xmm13 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -6941,7 +6941,7 @@ ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm13, %xmm9 -; SSE2-NEXT: movdqa %xmm8, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -7084,7 +7084,7 @@ ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtd %xmm8, %xmm13 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -7610,7 +7610,7 @@ ; SSE2-LABEL: test154: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -7882,7 +7882,7 @@ ; SSE2-LABEL: test156: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -8183,7 +8183,7 @@ ; SSE2-LABEL: test158: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -8509,7 +8509,7 @@ ; SSE2-LABEL: test160: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -10289,7 +10289,7 @@ ; SSE4: # %bb.0: # %entry ; SSE4-NEXT: movdqa %xmm0, %xmm2 ; SSE4-NEXT: movdqa %xmm1, %xmm3 -; SSE4-NEXT: pcmpgtq %xmm2, %xmm3 +; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE4-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE4-NEXT: pxor %xmm3, %xmm0 ; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1 @@ -10768,7 +10768,7 @@ ; SSE4: # %bb.0: # %entry ; SSE4-NEXT: movdqa %xmm0, %xmm2 ; SSE4-NEXT: movdqa %xmm1, %xmm3 -; SSE4-NEXT: pcmpgtq %xmm2, %xmm3 +; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE4-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE4-NEXT: pxor %xmm3, %xmm0 ; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2 Index: llvm/trunk/test/CodeGen/X86/widen_conv-3.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/widen_conv-3.ll +++ llvm/trunk/test/CodeGen/X86/widen_conv-3.ll @@ -74,7 +74,7 @@ ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE2-NEXT: movss %xmm0, (%eax) ; X86-SSE2-NEXT: movaps %xmm0, %xmm1 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; X86-SSE2-NEXT: movss %xmm1, 8(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm0, 4(%eax) Index: llvm/trunk/test/CodeGen/X86/widen_conv-4.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/widen_conv-4.ll +++ llvm/trunk/test/CodeGen/X86/widen_conv-4.ll @@ -19,7 +19,7 @@ ; X86-SSE2-NEXT: movups %xmm0, (%eax) ; X86-SSE2-NEXT: movss %xmm2, 16(%eax) ; X86-SSE2-NEXT: movaps %xmm2, %xmm0 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm2[1],xmm0[1] ; X86-SSE2-NEXT: movss %xmm0, 24(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm2, 20(%eax) @@ -100,7 +100,7 @@ ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE2-NEXT: movss %xmm0, (%eax) ; X86-SSE2-NEXT: movaps %xmm0, %xmm1 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; X86-SSE2-NEXT: movss %xmm1, 8(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm0, 4(%eax) Index: llvm/trunk/test/CodeGen/X86/win64_frame.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/win64_frame.ll +++ llvm/trunk/test/CodeGen/X86/win64_frame.ll @@ -238,7 +238,7 @@ ; PUSHF-NEXT: .seh_setframe 5, 32 ; PUSHF-NEXT: .seh_endprologue ; PUSHF-NEXT: movq %rdx, %rsi -; PUSHF-NEXT: movq %rsi, %rax +; PUSHF-NEXT: movq %rdx, %rax ; PUSHF-NEXT: lock cmpxchgq %r8, (%rcx) ; PUSHF-NEXT: pushfq ; PUSHF-NEXT: popq %rdi @@ -269,7 +269,7 @@ ; SAHF-NEXT: .seh_setframe 5, 32 ; SAHF-NEXT: .seh_endprologue ; SAHF-NEXT: movq %rdx, %rsi -; SAHF-NEXT: movq %rsi, %rax +; SAHF-NEXT: movq %rdx, %rax ; SAHF-NEXT: lock cmpxchgq %r8, (%rcx) ; SAHF-NEXT: seto %al ; SAHF-NEXT: lahf Index: llvm/trunk/test/CodeGen/X86/x86-interleaved-access.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/x86-interleaved-access.ll +++ llvm/trunk/test/CodeGen/X86/x86-interleaved-access.ll @@ -1751,7 +1751,7 @@ ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15] ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3] ; AVX1-NEXT: vmovdqa %xmm8, %xmm1 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3] ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm11, %ymm0 ; AVX1-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3] Index: llvm/trunk/test/CodeGen/X86/x86-shrink-wrap-unwind.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/x86-shrink-wrap-unwind.ll +++ llvm/trunk/test/CodeGen/X86/x86-shrink-wrap-unwind.ll @@ -23,7 +23,7 @@ ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] +; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Store %a in the alloca. @@ -69,7 +69,7 @@ ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] +; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. @@ -115,7 +115,7 @@ ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] +; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. Index: llvm/trunk/test/CodeGen/X86/x86-shrink-wrapping.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/x86-shrink-wrapping.ll +++ llvm/trunk/test/CodeGen/X86/x86-shrink-wrapping.ll @@ -17,7 +17,7 @@ ; Compare the arguments and jump to exit. ; No prologue needed. ; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]] -; ENABLE-NEXT: cmpl %esi, [[ARG0CPY]] +; ENABLE-NEXT: cmpl %esi, %edi ; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. @@ -27,7 +27,7 @@ ; Compare the arguments and jump to exit. ; After the prologue is set. ; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]] -; DISABLE-NEXT: cmpl %esi, [[ARG0CPY]] +; DISABLE-NEXT: cmpl %esi, %edi ; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Store %a in the alloca. Index: llvm/trunk/test/DebugInfo/COFF/fpo-shrink-wrap.ll =================================================================== --- llvm/trunk/test/DebugInfo/COFF/fpo-shrink-wrap.ll +++ llvm/trunk/test/DebugInfo/COFF/fpo-shrink-wrap.ll @@ -15,7 +15,7 @@ ; ASM: .cv_fpo_proc @shrink_wrap_basic@16 8 ; ASM: .cv_loc 0 1 3 9 # t.c:3:9 ; ASM: movl %ecx, %eax -; ASM: cmpl %edx, %eax +; ASM: cmpl %edx, %ecx ; ASM: jl [[EPILOGUE:LBB0_[0-9]+]] ; ASM: pushl %ebx Index: llvm/trunk/test/DebugInfo/X86/spill-nospill.ll =================================================================== --- llvm/trunk/test/DebugInfo/X86/spill-nospill.ll +++ llvm/trunk/test/DebugInfo/X86/spill-nospill.ll @@ -30,7 +30,7 @@ ; CHECK: callq g ; CHECK: movl %eax, %[[CSR:[^ ]*]] ; CHECK: #DEBUG_VALUE: f:y <- $esi -; CHECK: movl %[[CSR]], %ecx +; CHECK: movl %eax, %ecx ; CHECK: callq g ; CHECK: movl %[[CSR]], %ecx ; CHECK: callq g