Index: include/llvm/CodeGen/Passes.h =================================================================== --- include/llvm/CodeGen/Passes.h +++ include/llvm/CodeGen/Passes.h @@ -264,6 +264,10 @@ /// MachineSinking - This pass performs sinking on machine instructions. extern char &MachineSinkingID; + /// MachineCopyForwarding - This pass performs copy forwarding on + /// machine instructions. + extern char &MachineCopyForwardingID; + /// MachineCopyPropagation - This pass performs copy propagation on /// machine instructions. extern char &MachineCopyPropagationID; Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -229,6 +229,7 @@ void initializeMachineBranchProbabilityInfoPass(PassRegistry&); void initializeMachineCSEPass(PassRegistry&); void initializeMachineCombinerPass(PassRegistry &); +void initializeMachineCopyForwardingPass(PassRegistry&); void initializeMachineCopyPropagationPass(PassRegistry&); void initializeMachineDominanceFrontierPass(PassRegistry&); void initializeMachineDominatorTreePass(PassRegistry&); Index: lib/CodeGen/CMakeLists.txt =================================================================== --- lib/CodeGen/CMakeLists.txt +++ lib/CodeGen/CMakeLists.txt @@ -61,6 +61,7 @@ MachineBlockPlacement.cpp MachineBranchProbabilityInfo.cpp MachineCombiner.cpp + MachineCopyForwarding.cpp MachineCopyPropagation.cpp MachineCSE.cpp MachineDominanceFrontier.cpp Index: lib/CodeGen/CodeGen.cpp =================================================================== --- lib/CodeGen/CodeGen.cpp +++ lib/CodeGen/CodeGen.cpp @@ -51,6 +51,7 @@ initializeMachineCSEPass(Registry); initializeImplicitNullChecksPass(Registry); initializeMachineCombinerPass(Registry); + initializeMachineCopyForwardingPass(Registry); initializeMachineCopyPropagationPass(Registry); initializeMachineDominatorTreePass(Registry); initializeMachineFunctionPrinterPassPass(Registry); Index: lib/CodeGen/MachineCopyForwarding.cpp =================================================================== --- /dev/null +++ lib/CodeGen/MachineCopyForwarding.cpp @@ -0,0 +1,320 @@ +//===- MachineCopyForwarding.cpp - Machine Copy Forwarding Pass ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is a simple MachineInstr-level copy forwarding pass. It is meant to be +// run after register allocation but before virtual registers have been remapped +// to physical registers. It forwards the source of COPYs to the users of their +// destinations when doing so is legal, but otherwise leaves the program +// unchanged. For example: +// +// %vreg1 = COPY %vreg0 +// ... +// ... = OP %vreg1 +// +// If +// - the physical register assigned to %vreg0 has not been clobbered by the time +// of the use of %vreg1 +// - the register class constraints are satisfied +// - the COPY def is the only value that reaches OP +// then this pass replaces the above with: +// +// %vreg1 = COPY %vreg0 +// ... +// ... = OP %vreg0 +// +// and updates the relevant state required by VirtRegMap (e.g. LiveIntervals). +// +// This pass should not be confused with the mis-named MachineCopyPropagation +// pass, which doesn't do copy propagation, but rather removes redunant/dead +// copies. +//===----------------------------------------------------------------------===// + +#include "LiveDebugVariables.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/LiveStackAnalysis.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/VirtRegMap.h" +#include "llvm/Pass.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" +using namespace llvm; + +#define DEBUG_TYPE "machine-copyfwd" + +STATISTIC(NumCopyForwards, "Number of copy uses forwarded"); +STATISTIC(NumDeadCopies, "Number of dead copies removed"); + +namespace { +class MachineCopyForwarding : public MachineFunctionPass { + const TargetRegisterInfo *TRI; + const TargetInstrInfo *TII; + const MachineRegisterInfo *MRI; + SlotIndexes *Indexes; + LiveIntervals *LIS; + const VirtRegMap *VRM; + +public: + static char ID; // Pass identification, replacement for typeid + MachineCopyForwarding() : MachineFunctionPass(ID) { + initializeMachineCopyForwardingPass(*PassRegistry::getPassRegistry()); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); + AU.addPreserved(); + AU.addPreserved(); + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties().set( + MachineFunctionProperties::Property::NoPHIs); + } + +private: + void CopyForwardBlock(MachineBasicBlock &MBB); + + /// Map from physical register R to copy defining physical register R that + /// is currently available. + DenseMap CopyMap; + SmallPtrSet ShrinkLIs; + bool Changed; +}; +} // namespace +char MachineCopyForwarding::ID = 0; +char &llvm::MachineCopyForwardingID = MachineCopyForwarding::ID; + +INITIALIZE_PASS_BEGIN(MachineCopyForwarding, "machine-copyfwd", + "Machine Copy Forwarding Pass", false, false) +INITIALIZE_PASS_DEPENDENCY(LiveIntervals) +INITIALIZE_PASS_DEPENDENCY(SlotIndexes) +INITIALIZE_PASS_DEPENDENCY(VirtRegMap) +INITIALIZE_PASS_END(MachineCopyForwarding, "machine-copyfwd", + "Machine Copy Forwarding Pass", false, false) + +void MachineCopyForwarding::CopyForwardBlock(MachineBasicBlock &MBB) { + DEBUG(dbgs() << "MCF: CopyForwardBlock " << MBB.getName() << "\n"); + + auto getPhysReg = [&](unsigned Reg) { + return TargetRegisterInfo::isVirtualRegister(Reg) ? VRM->getPhys(Reg) : Reg; + }; + + auto clobberCopys = [&](unsigned DefPhysReg) { + // This could be sped up in the case where there are a lot of active COPYs + // by keeping a reverse multi-map from source reg to active dest regs, but + // the number of active COPYs is almost always very small and is bounded by + // the number of physical registers, so just do this the "slow" way for now. + for (auto I = CopyMap.begin(), E = CopyMap.end(); I != E;) { + auto Next = std::next(I); + const MachineInstr &Copy = *I->second; + if (TRI->regsOverlap(DefPhysReg, + getPhysReg(Copy.getOperand(0).getReg())) || + TRI->regsOverlap(DefPhysReg, + getPhysReg(Copy.getOperand(1).getReg()))) { + CopyMap.erase(I); + } + I = Next; + } + }; + + for (MachineInstr &MI : MBB) { + if (!CopyMap.empty()) { + // Forget about previous COPYs we have seen whose source or + // destination reg is clobbered by an earlyclobber. + for (const MachineOperand &MO : MI.operands()) + if (MO.isReg() && MO.isEarlyClobber()) + clobberCopys(getPhysReg(MO.getReg())); + + // Look for non-tied explicit vreg uses that have an active COPY + // instruction that defines the physical register allocated to them. + // Replace the vreg with the source of the active COPY. + for (MachineOperand &MOUse : MI.explicit_uses()) { + if (!MOUse.isReg() || MOUse.isTied()) + continue; + + unsigned UseVReg = MOUse.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(UseVReg)) + continue; + unsigned UseReg = VRM->getPhys(UseVReg); + + auto CI = CopyMap.find(UseReg); + if (CI == CopyMap.end()) + continue; + + MachineInstr &Copy = *CI->second; + MachineOperand &CopySrc = Copy.getOperand(1); + unsigned CopySrcReg = CopySrc.getReg(); + DEBUG(dbgs() << "MCF: Replacing " + << PrintReg(MOUse.getReg(), TRI, MOUse.getSubReg()) + << " with " + << PrintReg(CopySrcReg, TRI, CopySrc.getSubReg()) + << " in: " << MI;); + + if (MOUse.getSubReg() && + TargetRegisterInfo::isPhysicalRegister(CopySrcReg)) { + CopySrcReg = TRI->getSubReg(CopySrcReg, MOUse.getSubReg()); + MOUse.setSubReg(0); + } + MOUse.setReg(CopySrcReg); + + // Extend live range starting from COPY early-clobber slot, since that + // is where the original src live range ends. + SlotIndex CopyUseIdx = Indexes->getInstructionIndex(Copy).getRegSlot( + true /*=EarlyClobber*/); + SlotIndex UseIdx = Indexes->getInstructionIndex(MI).getRegSlot(); + if (TargetRegisterInfo::isVirtualRegister(CopySrcReg)) { + LiveInterval &LI = LIS->getInterval(CopySrcReg); + LI.extendInBlock(CopyUseIdx, UseIdx); + for (auto &S : LI.subranges()) + if (S.find(CopyUseIdx)) + S.extendInBlock(CopyUseIdx, UseIdx); + } + else + for (MCRegUnitIterator UI(CopySrcReg, TRI); UI.isValid(); ++UI) { + LiveRange &LR = LIS->getRegUnit(*UI); + LR.extendInBlock(CopyUseIdx, UseIdx); + } + + ShrinkLIs.insert(&LIS->getInterval(UseVReg)); + ++NumCopyForwards; + Changed = true; + } + + // Forget about previous COPYs we have seen whose source or + // destination reg is clobbered. + for (const MachineOperand &MO : MI.operands()) { + if (MO.isRegMask()) { + for (auto I = CopyMap.begin(), E = CopyMap.end(); I != E;) { + auto Next = std::next(I); + const MachineInstr &Copy = *I->second; + if (MO.clobbersPhysReg(getPhysReg(Copy.getOperand(0).getReg())) || + MO.clobbersPhysReg(getPhysReg(Copy.getOperand(1).getReg()))) { + CopyMap.erase(I); + } + I = Next; + } + } + + if (MO.isReg() && MO.isDef()) + clobberCopys(getPhysReg(MO.getReg())); + } + } + + // Function to check if copy should be tracked, and if so returns the + // destination physical register of the copy. + auto checkCopy = [&](const MachineInstr &Copy) -> unsigned { + if (!Copy.isFullCopy()) + return 0; + + unsigned DstReg = Copy.getOperand(0).getReg(); + if (!TargetRegisterInfo::isVirtualRegister(DstReg)) + return 0; + + const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); + + unsigned SrcReg = Copy.getOperand(1).getReg(); + unsigned SrcPhysReg; + if (TargetRegisterInfo::isVirtualRegister(SrcReg)) { + const TargetRegisterClass *SrcRC = MRI->getRegClass(DstReg); + if (!SrcRC->hasSubClassEq(DstRC) && !DstRC->hasSubClassEq(SrcRC)) + return 0; + SrcPhysReg = VRM->getPhys(SrcReg); + } else { + SrcPhysReg = SrcReg; + } + + if (!DstRC->contains(SrcPhysReg)) + return 0; + + if (MRI->isReserved(SrcPhysReg) && !MRI->isConstantPhysReg(SrcPhysReg)) + return 0; + + unsigned DstPhysReg = VRM->getPhys(DstReg); + // There is no point in tracking this COPY since the source and + // destination are the same, so forwarding the source wouldn't + // change anything. + if (DstPhysReg == SrcPhysReg) + return 0; + + return DstPhysReg; + }; + + if (unsigned DstPhysReg = checkCopy(MI)) { + assert(CopyMap.count(DstPhysReg) == 0 && + "CopyMap should not have entry for DstPhysReg"); + CopyMap[DstPhysReg] = &MI; + } + } + + CopyMap.clear(); +} + +bool MachineCopyForwarding::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + + Changed = false; + + TRI = MF.getSubtarget().getRegisterInfo(); + TII = MF.getSubtarget().getInstrInfo(); + MRI = &MF.getRegInfo(); + Indexes = &getAnalysis(); + LIS = &getAnalysis(); + VRM = &getAnalysis(); + + for (MachineBasicBlock &MBB : MF) + CopyForwardBlock(MBB); + + // Shrink live ranges that have been effected by copy forwarding and remove + // any COPYs that have been made dead by forwarding to all of their uses. + SmallVector DeadInsts; + for (LiveInterval *LI : ShrinkLIs) { + DEBUG(dbgs() << "MCF: Shrinking " << *LI << "\n";); + LIS->shrinkToUses(LI, &DeadInsts); + + for (MachineInstr *DeadInst : DeadInsts) { + MachineInstr &DeadCopy = *DeadInst; + DEBUG(dbgs() << "MCF: Dead copy: " << DeadCopy;); + assert(DeadCopy.isCopy() && + "Unexpected dead inst after shrinking live interval."); + + if (DeadCopy.getOperand(0).isUndef() || DeadCopy.getNumOperands() > 2) { + DeadCopy.setDesc(TII->get(TargetOpcode::KILL)); + DEBUG(dbgs() << " replace by: " << DeadCopy); + } else { + Indexes->removeMachineInstrFromMaps(DeadCopy); + DeadCopy.eraseFromParent(); + DEBUG(dbgs() << " deleted.\n"); + } + ++NumDeadCopies; + Changed = true; + } + DeadInsts.clear(); + } + ShrinkLIs.clear(); + + return Changed; +} Index: lib/CodeGen/TargetPassConfig.cpp =================================================================== --- lib/CodeGen/TargetPassConfig.cpp +++ lib/CodeGen/TargetPassConfig.cpp @@ -74,6 +74,8 @@ cl::Hidden, cl::desc("Disable ConstantHoisting")); static cl::opt DisableCGP("disable-cgp", cl::Hidden, cl::desc("Disable Codegen Prepare")); +static cl::opt DisableCopyFwd("disable-copyfwd", cl::Hidden, + cl::desc("Disable Copy Forwarding pass")); static cl::opt DisableCopyProp("disable-copyprop", cl::Hidden, cl::desc("Disable Copy Propagation pass")); static cl::opt DisablePartialLibcallInlining("disable-partial-libcall-inlining", @@ -192,6 +194,9 @@ if (StandardID == &MachineSinkingID) return applyDisable(TargetID, DisableMachineSink); + if (StandardID == &MachineCopyForwardingID) + return applyDisable(TargetID, DisableCopyFwd); + if (StandardID == &MachineCopyPropagationID) return applyDisable(TargetID, DisableCopyProp); @@ -856,6 +861,9 @@ // Allow targets to change the register assignments before rewriting. addPreRewrite(); + // Copy forwarding. + addPass(&MachineCopyForwardingID); + // Finally rewrite virtual registers. addPass(&VirtRegRewriterID); Index: test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll =================================================================== --- test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll +++ test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll @@ -4,8 +4,10 @@ define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp { entry: ; CHECK-LABEL: t: -; CHECK: mov x0, [[REG1:x[0-9]+]] -; CHECK: mov x1, [[REG2:x[0-9]+]] +; CHECK: mov [[REG2:x[0-9]+]], x3 +; CHECK: mov [[REG1:x[0-9]+]], x2 +; CHECK: mov x0, x2 +; CHECK: mov x1, x3 ; CHECK: bl _foo ; CHECK: mov x0, [[REG1]] ; CHECK: mov x1, [[REG2]] Index: test/CodeGen/AArch64/f16-instructions.ll =================================================================== --- test/CodeGen/AArch64/f16-instructions.ll +++ test/CodeGen/AArch64/f16-instructions.ll @@ -350,7 +350,7 @@ ; CHECK-LABEL: test_phi: ; CHECK: mov x[[PTR:[0-9]+]], x0 -; CHECK: ldr h[[AB:[0-9]+]], [x[[PTR]]] +; CHECK: ldr h[[AB:[0-9]+]], [x0] ; CHECK: [[LOOP:LBB[0-9_]+]]: ; CHECK: mov.16b v[[R:[0-9]+]], v[[AB]] ; CHECK: ldr h[[AB]], [x[[PTR]]] Index: test/CodeGen/AArch64/flags-multiuse.ll =================================================================== --- test/CodeGen/AArch64/flags-multiuse.ll +++ test/CodeGen/AArch64/flags-multiuse.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s +; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs -o - %s | FileCheck %s ; LLVM should be able to cope with multiple uses of the same flag-setting ; instruction at different points of a routine. Either by rematerializing the @@ -12,7 +12,9 @@ ; CHECK-LABEL: test_multiflag: %test = icmp ne i32 %n, %m -; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]] +; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS:w[0-9]+]] +; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS:w[0-9]+]] +; CHECK: cmp [[LHS]], [[RHS]] %val = zext i1 %test to i32 ; CHECK: cset {{[xw][0-9]+}}, ne @@ -25,7 +27,7 @@ ; Currently, the comparison is emitted again. An MSR/MRS pair would also be ; acceptable, but assuming the call preserves NZCV is not. br i1 %test, label %iftrue, label %iffalse -; CHECK: cmp [[LHS]], [[RHS]] +; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]] ; CHECK: b.eq iftrue: Index: test/CodeGen/AArch64/merge-store-dependency.ll =================================================================== --- test/CodeGen/AArch64/merge-store-dependency.ll +++ test/CodeGen/AArch64/merge-store-dependency.ll @@ -8,10 +8,9 @@ define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) { ;CHECK-LABEL: test entry: -; A53: mov [[DATA:w[0-9]+]], w1 ; A53: str q{{[0-9]+}}, {{.*}} ; A53: str q{{[0-9]+}}, {{.*}} -; A53: str [[DATA]], {{.*}} +; A53: str w1, {{.*}} %0 = bitcast %struct1* %fde to i8* tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false) Index: test/CodeGen/AArch64/neg-imm.ll =================================================================== --- test/CodeGen/AArch64/neg-imm.ll +++ test/CodeGen/AArch64/neg-imm.ll @@ -7,8 +7,8 @@ define void @test(i32 %px) { ; CHECK_LABEL: test: ; CHECK_LABEL: %entry -; CHECK: subs -; CHECK-NEXT: csel +; CHECK: subs [[REG0:w[0-9]+]], +; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]] entry: %sub = add nsw i32 %px, -1 %cmp = icmp slt i32 %px, 1 Index: test/CodeGen/AMDGPU/ret.ll =================================================================== --- test/CodeGen/AMDGPU/ret.ll +++ test/CodeGen/AMDGPU/ret.ll @@ -2,10 +2,10 @@ ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s ; GCN-LABEL: {{^}}vgpr: -; GCN: v_mov_b32_e32 v1, v0 -; GCN-DAG: v_add_f32_e32 v0, 1.0, v1 -; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm +; GCN-DAG: v_mov_b32_e32 v1, v0 +; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm ; GCN: s_waitcnt expcnt(0) +; GCN: v_add_f32_e32 v0, 1.0, v0 ; GCN-NOT: s_endpgm define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 { bb: @@ -204,13 +204,13 @@ } ; GCN-LABEL: {{^}}both: -; GCN: v_mov_b32_e32 v1, v0 -; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm -; GCN-DAG: v_add_f32_e32 v0, 1.0, v1 -; GCN-DAG: s_add_i32 s0, s3, 2 +; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm +; GCN-DAG: v_mov_b32_e32 v1, v0 ; GCN-DAG: s_mov_b32 s1, s2 -; GCN: s_mov_b32 s2, s3 ; GCN: s_waitcnt expcnt(0) +; GCN: v_add_f32_e32 v0, 1.0, v0 +; GCN-DAG: s_add_i32 s0, s3, 2 +; GCN-DAG: s_mov_b32 s2, s3 ; GCN-NOT: s_endpgm define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 { bb: Index: test/CodeGen/ARM/atomic-op.ll =================================================================== --- test/CodeGen/ARM/atomic-op.ll +++ test/CodeGen/ARM/atomic-op.ll @@ -272,7 +272,8 @@ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic %oldval = extractvalue { i32, i1 } %pair, 0 -; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]] +; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0 +; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0] ; CHECK-ARMV7: cmp [[OLDVAL]], r1 ; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]] ; CHECK-ARMV7: dmb ish @@ -290,7 +291,8 @@ ; CHECK-ARMV7: dmb ish ; CHECK-ARMV7: bx lr -; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]] +; CHECK-T2: mov r[[ADDR:[0-9]+]], r0 +; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0] ; CHECK-T2: cmp [[OLDVAL]], r1 ; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]] ; CHECK-T2: dmb ish Index: test/CodeGen/ARM/swifterror.ll =================================================================== --- test/CodeGen/ARM/swifterror.ll +++ test/CodeGen/ARM/swifterror.ll @@ -181,7 +181,7 @@ ; CHECK-APPLE: beq ; CHECK-APPLE: mov r0, #16 ; CHECK-APPLE: malloc -; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8] +; CHECK-APPLE: strb r{{.*}}, [r0, #8] ; CHECK-APPLE: ble ; CHECK-APPLE: mov r8, [[ID]] Index: test/CodeGen/Hexagon/pred-absolute-store.ll =================================================================== --- test/CodeGen/Hexagon/pred-absolute-store.ll +++ test/CodeGen/Hexagon/pred-absolute-store.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; Check that we are able to predicate instructions with abosolute ; addressing mode. -; CHECK: if ({{!*}}p{{[0-2]}}.new) memw(##gvar) = r{{[0-9]+}} +; CHECK: if ({{!*}}p{{[0-2]}}) memw(##gvar) = r{{[0-9]+}} @gvar = external global i32 define i32 @test2(i32 %a, i32 %b) nounwind { Index: test/CodeGen/PowerPC/fma-mutate.ll =================================================================== --- test/CodeGen/PowerPC/fma-mutate.ll +++ test/CodeGen/PowerPC/fma-mutate.ll @@ -14,7 +14,8 @@ ret double %r ; CHECK: @foo3 -; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]] +; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]] +; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]] ; CHECK: xsmaddmdp ; CHECK: xsmaddadp } Index: test/CodeGen/PowerPC/inlineasm-i64-reg.ll =================================================================== --- test/CodeGen/PowerPC/inlineasm-i64-reg.ll +++ test/CodeGen/PowerPC/inlineasm-i64-reg.ll @@ -75,7 +75,7 @@ ; CHECK-DAG: mr [[REG:[0-9]+]], 3 ; CHECK-DAG: li 0, 1076 -; CHECK: stw [[REG]], +; CHECK-DAG: stw 3, ; CHECK: #APP ; CHECK: sc Index: test/CodeGen/SPARC/atomics.ll =================================================================== --- test/CodeGen/SPARC/atomics.ll +++ test/CodeGen/SPARC/atomics.ll @@ -235,8 +235,9 @@ ; CHECK-LABEL: test_load_add_i32 ; CHECK: membar -; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]] -; CHECK: cas [%o0], [[V]], [[U]] +; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]] +; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]] +; CHECK: cas [%o0], [[V]], [[V2]] ; CHECK: membar define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) { entry: Index: test/CodeGen/Thumb/thumb-shrink-wrapping.ll =================================================================== --- test/CodeGen/Thumb/thumb-shrink-wrapping.ll +++ test/CodeGen/Thumb/thumb-shrink-wrapping.ll @@ -603,7 +603,7 @@ define i32 @b_to_bx(i32 %value) { ; CHECK-LABEL: b_to_bx: ; DISABLE: push {r7, lr} -; CHECK: cmp r1, #49 +; CHECK: cmp r0, #49 ; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]] ; ENABLE: push {r7, lr} Index: test/CodeGen/X86/arg-copy-elide.ll =================================================================== --- test/CodeGen/X86/arg-copy-elide.ll +++ test/CodeGen/X86/arg-copy-elide.ll @@ -106,7 +106,7 @@ ; CHECK-DAG: movl %edx, %[[r1:[^ ]*]] ; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]] ; CHECK-DAG: movl %[[r2]], 4(%esp) -; CHECK-DAG: movl %[[r1]], (%esp) +; CHECK-DAG: movl %edx, (%esp) ; CHECK: movl %esp, %[[reg:[^ ]*]] ; CHECK: pushl %[[reg]] ; CHECK: calll _addrof_i64 Index: test/CodeGen/X86/avx512-bugfix-25270.ll =================================================================== --- test/CodeGen/X86/avx512-bugfix-25270.ll +++ test/CodeGen/X86/avx512-bugfix-25270.ll @@ -9,10 +9,10 @@ ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: subq $112, %rsp ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: vmovups (%rbx), %zmm0 +; CHECK-NEXT: vmovups (%rdi), %zmm0 ; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill ; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1 -; CHECK-NEXT: vmovaps %zmm1, (%rbx) +; CHECK-NEXT: vmovaps %zmm1, (%rdi) ; CHECK-NEXT: callq _Print__512 ; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload ; CHECK-NEXT: callq _Print__512 Index: test/CodeGen/X86/avx512-calling-conv.ll =================================================================== --- test/CodeGen/X86/avx512-calling-conv.ll +++ test/CodeGen/X86/avx512-calling-conv.ll @@ -466,7 +466,7 @@ ; KNL_X32-NEXT: movl %edi, (%esp) ; KNL_X32-NEXT: calll _test11 ; KNL_X32-NEXT: movl %eax, %ebx -; KNL_X32-NEXT: movzbl %bl, %eax +; KNL_X32-NEXT: movzbl %al, %eax ; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp) ; KNL_X32-NEXT: movl %edi, (%esp) Index: test/CodeGen/X86/buildvec-insertvec.ll =================================================================== --- test/CodeGen/X86/buildvec-insertvec.ll +++ test/CodeGen/X86/buildvec-insertvec.ll @@ -38,7 +38,7 @@ ; SSE2-LABEL: test_negative_zero_1: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: xorps %xmm2, %xmm2 Index: test/CodeGen/X86/combine-fcopysign.ll =================================================================== --- test/CodeGen/X86/combine-fcopysign.ll +++ test/CodeGen/X86/combine-fcopysign.ll @@ -231,8 +231,8 @@ ; SSE-NEXT: cvtss2sd %xmm2, %xmm4 ; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3] ; SSE-NEXT: movaps %xmm2, %xmm6 -; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3] ; SSE-NEXT: movaps {{.*#+}} xmm7 ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: andps %xmm7, %xmm2 @@ -247,7 +247,7 @@ ; SSE-NEXT: orps %xmm0, %xmm4 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm4[0] ; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; SSE-NEXT: andps %xmm7, %xmm0 ; SSE-NEXT: cvtss2sd %xmm3, %xmm3 ; SSE-NEXT: andps %xmm8, %xmm3 @@ -294,7 +294,7 @@ ; SSE-NEXT: orps %xmm6, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE-NEXT: movaps %xmm3, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1] ; SSE-NEXT: andps %xmm5, %xmm1 ; SSE-NEXT: xorps %xmm6, %xmm6 ; SSE-NEXT: cvtsd2ss %xmm2, %xmm6 Index: test/CodeGen/X86/complex-fastmath.ll =================================================================== --- test/CodeGen/X86/complex-fastmath.ll +++ test/CodeGen/X86/complex-fastmath.ll @@ -14,7 +14,7 @@ ; SSE: # BB#0: ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: addss %xmm2, %xmm2 +; SSE-NEXT: addss %xmm0, %xmm2 ; SSE-NEXT: mulss %xmm1, %xmm2 ; SSE-NEXT: mulss %xmm0, %xmm0 ; SSE-NEXT: mulss %xmm1, %xmm1 @@ -58,9 +58,9 @@ ; SSE-LABEL: complex_square_f64: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: addsd %xmm2, %xmm2 +; SSE-NEXT: addsd %xmm0, %xmm2 ; SSE-NEXT: mulsd %xmm1, %xmm2 ; SSE-NEXT: mulsd %xmm0, %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm1 @@ -161,9 +161,9 @@ ; SSE-LABEL: complex_mul_f64: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: mulsd %xmm0, %xmm4 ; SSE-NEXT: mulsd %xmm1, %xmm0 Index: test/CodeGen/X86/copy-propagation.ll =================================================================== --- test/CodeGen/X86/copy-propagation.ll +++ test/CodeGen/X86/copy-propagation.ll @@ -1,5 +1,6 @@ -; RUN: llc %s -mattr=+avx -o - | FileCheck %s +; RUN: llc %s -mattr=+avx -o - -disable-copyfwd | FileCheck %s ; Originally from http://llvm.org/PR21743. +; Disable MachineCopyForwarding pass since it hides the original bug. target triple = "x86_64-pc-win32-elf" Index: test/CodeGen/X86/divide-by-constant.ll =================================================================== --- test/CodeGen/X86/divide-by-constant.ll +++ test/CodeGen/X86/divide-by-constant.ll @@ -318,7 +318,7 @@ ; X64: # BB#0: # %entry ; X64-NEXT: movq %rdi, %rcx ; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F -; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %rdx ; X64-NEXT: shrq $12, %rdx ; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039 Index: test/CodeGen/X86/fmaxnum.ll =================================================================== --- test/CodeGen/X86/fmaxnum.ll +++ test/CodeGen/X86/fmaxnum.ll @@ -18,7 +18,7 @@ ; CHECK-LABEL: @test_fmaxf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: maxss %xmm0, %xmm1 @@ -47,7 +47,7 @@ ; CHECK-LABEL: @test_fmax ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm2, %xmm2 +; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: maxsd %xmm0, %xmm1 @@ -74,7 +74,7 @@ ; CHECK-LABEL: @test_intrinsic_fmaxf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: maxss %xmm0, %xmm1 @@ -95,7 +95,7 @@ ; CHECK-LABEL: @test_intrinsic_fmax ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm2, %xmm2 +; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: maxsd %xmm0, %xmm1 Index: test/CodeGen/X86/fminnum.ll =================================================================== --- test/CodeGen/X86/fminnum.ll +++ test/CodeGen/X86/fminnum.ll @@ -18,7 +18,7 @@ ; CHECK-LABEL: @test_fminf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: minss %xmm0, %xmm1 @@ -40,7 +40,7 @@ ; CHECK-LABEL: @test_fmin ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm2, %xmm2 +; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: minsd %xmm0, %xmm1 @@ -67,7 +67,7 @@ ; CHECK-LABEL: @test_intrinsic_fminf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: minss %xmm0, %xmm1 @@ -87,7 +87,7 @@ ; CHECK-LABEL: @test_intrinsic_fmin ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm2, %xmm2 +; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: minsd %xmm0, %xmm1 Index: test/CodeGen/X86/fp128-i128.ll =================================================================== --- test/CodeGen/X86/fp128-i128.ll +++ test/CodeGen/X86/fp128-i128.ll @@ -198,7 +198,7 @@ ret fp128 %add ; CHECK-LABEL: TestI128_4: ; CHECK: movaps %xmm0, %xmm1 -; CHECK-NEXT: movaps %xmm1, 16(%rsp) +; CHECK-NEXT: movaps %xmm0, 16(%rsp) ; CHECK-NEXT: movq 24(%rsp), %rax ; CHECK-NEXT: movq %rax, 8(%rsp) ; CHECK-NEXT: movq $0, (%rsp) @@ -242,7 +242,7 @@ ret fp128 %add ; CHECK-LABEL: acosl: ; CHECK: movaps %xmm0, %xmm1 -; CHECK-NEXT: movaps %xmm1, 16(%rsp) +; CHECK-NEXT: movaps %xmm0, 16(%rsp) ; CHECK-NEXT: movq 24(%rsp), %rax ; CHECK-NEXT: movq %rax, 8(%rsp) ; CHECK-NEXT: movq $0, (%rsp) Index: test/CodeGen/X86/haddsub-2.ll =================================================================== --- test/CodeGen/X86/haddsub-2.ll +++ test/CodeGen/X86/haddsub-2.ll @@ -908,16 +908,16 @@ ; SSE-LABEL: not_a_hsub_2: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: subss %xmm3, %xmm2 ; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE-NEXT: subss %xmm3, %xmm0 ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3] ; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1] ; SSE-NEXT: subss %xmm4, %xmm3 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] @@ -965,10 +965,10 @@ ; SSE-LABEL: not_a_hsub_3: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1] ; SSE-NEXT: subsd %xmm2, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: subsd %xmm0, %xmm2 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movapd %xmm2, %xmm0 Index: test/CodeGen/X86/haddsub-undef.ll =================================================================== --- test/CodeGen/X86/haddsub-undef.ll +++ test/CodeGen/X86/haddsub-undef.ll @@ -103,7 +103,7 @@ ; SSE-LABEL: test5_undef: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE-NEXT: addsd %xmm0, %xmm1 ; SSE-NEXT: movapd %xmm1, %xmm0 ; SSE-NEXT: retq @@ -168,7 +168,7 @@ ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE-NEXT: addss %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] ; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] Index: test/CodeGen/X86/inline-asm-fpstack.ll =================================================================== --- test/CodeGen/X86/inline-asm-fpstack.ll +++ test/CodeGen/X86/inline-asm-fpstack.ll @@ -130,7 +130,8 @@ ; CHECK: testPR4459 ; CHECK: ceil ; CHECK: fld %st(0) -; CHECK-NOT: fxch +; FIXME: This fxch is redundant. +; CHECK: fxch %st(1) ; CHECK: fistpl ; CHECK-NOT: fxch ; CHECK: fstpt Index: test/CodeGen/X86/ipra-local-linkage.ll =================================================================== --- test/CodeGen/X86/ipra-local-linkage.ll +++ test/CodeGen/X86/ipra-local-linkage.ll @@ -24,7 +24,7 @@ call void @foo() ; CHECK-LABEL: bar: ; CHECK: callq foo - ; CHECK-NEXT: movl %eax, %r15d + ; CHECK-NEXT: movl %edi, %r15d call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X) ret void } Index: test/CodeGen/X86/localescape.ll =================================================================== --- test/CodeGen/X86/localescape.ll +++ test/CodeGen/X86/localescape.ll @@ -27,7 +27,7 @@ ; X64-LABEL: print_framealloc_from_fp: ; X64: movq %rcx, %[[parent_fp:[a-z]+]] -; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx +; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx ; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]] ; X64: movq %[[str]], %rcx ; X64: callq printf Index: test/CodeGen/X86/mul-i1024.ll =================================================================== --- test/CodeGen/X86/mul-i1024.ll +++ test/CodeGen/X86/mul-i1024.ll @@ -159,7 +159,7 @@ ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %esi ; X32-NEXT: movl %esi, %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: pushl %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -752,7 +752,7 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi ; X32-NEXT: movl %ebx, %esi -; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -898,7 +898,6 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi -; X32-NEXT: movl %edi, %ebx ; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 @@ -910,7 +909,7 @@ ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 @@ -1365,7 +1364,7 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: movl %edi, %ebx -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 @@ -2472,7 +2471,7 @@ ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -4423,21 +4422,19 @@ ; X64-NEXT: movq %r11, %r13 ; X64-NEXT: addq %rax, %r13 ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, %r15 -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: addq %rbp, %r13 ; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rbx, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rsi, %r8 -; X64-NEXT: movq (%r8), %rax +; X64-NEXT: movq (%rsi), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: xorl %ebp, %ebp ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rax, %r14 ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq 8(%r8), %rax +; X64-NEXT: movq 8(%rsi), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rbp ; X64-NEXT: xorl %r9d, %r9d @@ -4454,8 +4451,9 @@ ; X64-NEXT: andl $1, %ebp ; X64-NEXT: addq %rax, %rbx ; X64-NEXT: adcq %rdx, %rbp -; X64-NEXT: movq 16(%r8), %rax -; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 16(%rsi), %rax +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill @@ -4470,6 +4468,7 @@ ; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %r14, %rax +; X64-NEXT: movq %rdi, %r15 ; X64-NEXT: adcq %rcx, %r15 ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq (%r10), %rax @@ -4480,7 +4479,7 @@ ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %r14, %rax ; X64-NEXT: movq %r14, %rdi -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rdx, %rax ; X64-NEXT: adcq %rcx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq 32(%r8), %rax @@ -4498,7 +4497,7 @@ ; X64-NEXT: addq %rdi, %r11 ; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdi, %r11 -; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %r12, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill @@ -4543,7 +4542,7 @@ ; X64-NEXT: addq %r11, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r15, %rcx -; X64-NEXT: adcq %rcx, %r12 +; X64-NEXT: adcq %r15, %r12 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rbx, %rdi ; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill @@ -4645,7 +4644,7 @@ ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq %r10, %rsi -; X64-NEXT: mulq %rsi +; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %r10 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload @@ -4678,8 +4677,7 @@ ; X64-NEXT: adcq %r8, %r13 ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r12 -; X64-NEXT: movq %r10, %rbp -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %r10, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rcx @@ -4693,7 +4691,7 @@ ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq 24(%rax), %r14 -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %r10, %rax ; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rdi, %rax @@ -4842,13 +4840,13 @@ ; X64-NEXT: adcq $0, %r8 ; X64-NEXT: adcq $0, %r10 ; X64-NEXT: movq %r9, %rsi -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %r9, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rax, %r11 ; X64-NEXT: movq %rbp, %r14 -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdi, %r13 ; X64-NEXT: movq %rdx, %rdi @@ -4976,7 +4974,7 @@ ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r11, %rdi -; X64-NEXT: mulq %rdi +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %r8 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload @@ -5009,7 +5007,7 @@ ; X64-NEXT: adcq $0, %r9 ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: movq %r8, %rbp -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %r8, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r14 @@ -5197,7 +5195,7 @@ ; X64-NEXT: adcq $0, %r8 ; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r9, %rdi -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %r9, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r8 @@ -5336,9 +5334,9 @@ ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload -; X64-NEXT: addq %rbx, %r15 +; X64-NEXT: addq %rax, %r15 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload -; X64-NEXT: adcq %r11, %r12 +; X64-NEXT: adcq %rdx, %r12 ; X64-NEXT: addq %rbp, %r15 ; X64-NEXT: adcq %rsi, %r12 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload @@ -5366,7 +5364,6 @@ ; X64-NEXT: sbbq %rsi, %rsi ; X64-NEXT: andl $1, %esi ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, %r9 ; X64-NEXT: mulq %r8 ; X64-NEXT: addq %rcx, %rax ; X64-NEXT: adcq %rsi, %rdx @@ -5380,11 +5377,12 @@ ; X64-NEXT: adcq $0, %r12 ; X64-NEXT: movq 80(%r13), %rbp ; X64-NEXT: movq %r14, %rsi -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %r14, %rax ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %r8 ; X64-NEXT: movq %rax, %r14 -; X64-NEXT: movq %r9, %rax +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %r10 ; X64-NEXT: movq %rax, %rcx @@ -5412,7 +5410,7 @@ ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %rdi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq %rdi, %rsi +; X64-NEXT: addq %rax, %rsi ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: addq %rcx, %rsi @@ -5632,10 +5630,10 @@ ; X64-NEXT: addq %rcx, %r12 ; X64-NEXT: adcq %rsi, %r8 ; X64-NEXT: movq %r11, %rsi -; X64-NEXT: movq 64(%rsi), %r11 +; X64-NEXT: movq 64(%r11), %r11 ; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq %rbx, %rdi -; X64-NEXT: mulq %rdi +; X64-NEXT: mulq %rbx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq 72(%rsi), %rbx @@ -5661,13 +5659,13 @@ ; X64-NEXT: addq %rcx, %rbp ; X64-NEXT: adcq %rdi, %rsi ; X64-NEXT: movq %r11, %rdi -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %r13 ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, %r13 -; X64-NEXT: addq %r13, %r10 -; X64-NEXT: adcq %r11, %r9 +; X64-NEXT: addq %rax, %r10 +; X64-NEXT: adcq %rdx, %r9 ; X64-NEXT: addq %rbp, %r10 ; X64-NEXT: adcq %rsi, %r9 ; X64-NEXT: addq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload Index: test/CodeGen/X86/mul-i512.ll =================================================================== --- test/CodeGen/X86/mul-i512.ll +++ test/CodeGen/X86/mul-i512.ll @@ -918,7 +918,7 @@ ; X64-NEXT: movq 8(%rsi), %r8 ; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: mulq %rsi +; X64-NEXT: mulq %rdx ; X64-NEXT: movq %rdx, %r10 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbp, %rax @@ -947,18 +947,18 @@ ; X64-NEXT: adcq %rbx, %rsi ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: movq %r9, %rbx -; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rax, %r10 ; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %r15 -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %r10, %r15 +; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: adcq %r13, %r9 ; X64-NEXT: addq %rbp, %r15 ; X64-NEXT: adcq %rsi, %r9 @@ -993,13 +993,13 @@ ; X64-NEXT: addq %rbx, %rbp ; X64-NEXT: adcq %rdi, %rsi ; X64-NEXT: movq %r14, %rcx -; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r14, %rax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: mulq %rdx ; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %r11, %r10 -; X64-NEXT: adcq %r14, %r13 +; X64-NEXT: addq %rax, %r10 +; X64-NEXT: adcq %rdx, %r13 ; X64-NEXT: addq %rbp, %r10 ; X64-NEXT: adcq %rsi, %r13 ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload @@ -1011,7 +1011,7 @@ ; X64-NEXT: movq 16(%rsi), %r8 ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %r9 -; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %r12 @@ -1043,7 +1043,7 @@ ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rbp, %r11 +; X64-NEXT: addq %rax, %r11 ; X64-NEXT: adcq %rdx, %r14 ; X64-NEXT: addq %r9, %r11 ; X64-NEXT: adcq %rbx, %r14 Index: test/CodeGen/X86/mul128.ll =================================================================== --- test/CodeGen/X86/mul128.ll +++ test/CodeGen/X86/mul128.ll @@ -7,7 +7,7 @@ ; X64-NEXT: movq %rdx, %r8 ; X64-NEXT: imulq %rdi, %rcx ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: mulq %rdx ; X64-NEXT: addq %rcx, %rdx ; X64-NEXT: imulq %r8, %rsi ; X64-NEXT: addq %rsi, %rdx Index: test/CodeGen/X86/pmul.ll =================================================================== --- test/CodeGen/X86/pmul.ll +++ test/CodeGen/X86/pmul.ll @@ -9,7 +9,7 @@ ; SSE2-LABEL: mul_v16i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm2, %xmm1 @@ -143,10 +143,10 @@ ; SSE2-LABEL: mul_v16i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] @@ -386,7 +386,7 @@ ; SSE2-LABEL: mul_v32i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm3, %xmm2 @@ -398,7 +398,7 @@ ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm2, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pand %xmm4, %xmm2 @@ -567,10 +567,10 @@ ; SSE2-LABEL: mul_v32i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] @@ -583,10 +583,10 @@ ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm5, %xmm0 ; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm2, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5 @@ -774,7 +774,7 @@ ; SSE2-LABEL: mul_v64i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm4, %xmm6 @@ -786,7 +786,7 @@ ; SSE2-NEXT: pand %xmm5, %xmm0 ; SSE2-NEXT: packuswb %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -796,7 +796,7 @@ ; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: packuswb %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -806,7 +806,7 @@ ; SSE2-NEXT: pand %xmm5, %xmm2 ; SSE2-NEXT: packuswb %xmm6, %xmm2 ; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -821,7 +821,7 @@ ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 +; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117] ; SSE41-NEXT: pmullw %xmm6, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] @@ -939,10 +939,10 @@ ; SSE2-LABEL: mul_v64i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm4, %xmm8 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15] ; SSE2-NEXT: psraw $8, %xmm8 ; SSE2-NEXT: movdqa %xmm0, %xmm9 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: pmullw %xmm8, %xmm9 ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] @@ -955,10 +955,10 @@ ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: packuswb %xmm9, %xmm0 ; SSE2-NEXT: movdqa %xmm5, %xmm9 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15] ; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: pmullw %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm4 @@ -970,10 +970,10 @@ ; SSE2-NEXT: pand %xmm8, %xmm1 ; SSE2-NEXT: packuswb %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm6, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 @@ -985,10 +985,10 @@ ; SSE2-NEXT: pand %xmm8, %xmm2 ; SSE2-NEXT: packuswb %xmm5, %xmm2 ; SSE2-NEXT: movdqa %xmm7, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm3, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 @@ -1006,7 +1006,7 @@ ; SSE41-NEXT: movdqa %xmm1, %xmm8 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pmovsxbw %xmm4, %xmm9 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 +; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmullw %xmm9, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm9, %xmm0 Index: test/CodeGen/X86/powi.ll =================================================================== --- test/CodeGen/X86/powi.ll +++ test/CodeGen/X86/powi.ll @@ -5,7 +5,7 @@ ; CHECK-LABEL: pow_wrapper: ; CHECK: # BB#0: ; CHECK-NEXT: movapd %xmm0, %xmm1 -; CHECK-NEXT: mulsd %xmm1, %xmm1 +; CHECK-NEXT: mulsd %xmm0, %xmm1 ; CHECK-NEXT: mulsd %xmm1, %xmm0 ; CHECK-NEXT: mulsd %xmm1, %xmm1 ; CHECK-NEXT: mulsd %xmm1, %xmm0 Index: test/CodeGen/X86/pr11334.ll =================================================================== --- test/CodeGen/X86/pr11334.ll +++ test/CodeGen/X86/pr11334.ll @@ -25,7 +25,7 @@ ; SSE-NEXT: cvtps2pd %xmm0, %xmm0 ; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm2, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1] ; SSE-NEXT: fldl -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq Index: test/CodeGen/X86/pr29112.ll =================================================================== --- test/CodeGen/X86/pr29112.ll +++ test/CodeGen/X86/pr29112.ll @@ -49,16 +49,16 @@ ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3],xmm9[3] ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0] ; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2 -; CHECK-NEXT: vmovaps %xmm15, %xmm1 -; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9 +; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill +; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9 ; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0 -; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8 +; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8 ; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3 ; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0 ; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovaps %xmm9, (%rsp) +; CHECK-NEXT: vmovaps %xmm15, %xmm1 ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq foo Index: test/CodeGen/X86/shrink-wrap-chkstk.ll =================================================================== --- test/CodeGen/X86/shrink-wrap-chkstk.ll +++ test/CodeGen/X86/shrink-wrap-chkstk.ll @@ -61,7 +61,7 @@ ; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue ; CHECK: movl %ecx, %eax -; CHECK: cmpl %edx, %eax +; CHECK: cmpl %edx, %ecx ; CHECK: jge LBB1_2 ; CHECK: pushl %eax ; CHECK: movl $4092, %eax Index: test/CodeGen/X86/sqrt-fastmath.ll =================================================================== --- test/CodeGen/X86/sqrt-fastmath.ll +++ test/CodeGen/X86/sqrt-fastmath.ll @@ -132,7 +132,7 @@ ; SSE: # BB#0: ; SSE-NEXT: rsqrtss %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: mulss %xmm2, %xmm2 +; SSE-NEXT: mulss %xmm1, %xmm2 ; SSE-NEXT: mulss %xmm0, %xmm2 ; SSE-NEXT: addss {{.*}}(%rip), %xmm2 ; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 @@ -178,7 +178,7 @@ ; SSE: # BB#0: ; SSE-NEXT: rsqrtps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: mulps %xmm2, %xmm2 +; SSE-NEXT: mulps %xmm1, %xmm2 ; SSE-NEXT: mulps %xmm0, %xmm2 ; SSE-NEXT: addps {{.*}}(%rip), %xmm2 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 @@ -228,7 +228,7 @@ ; SSE-NEXT: rsqrtps %xmm0, %xmm3 ; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01] ; SSE-NEXT: movaps %xmm3, %xmm2 -; SSE-NEXT: mulps %xmm2, %xmm2 +; SSE-NEXT: mulps %xmm3, %xmm2 ; SSE-NEXT: mulps %xmm0, %xmm2 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00] ; SSE-NEXT: addps %xmm0, %xmm2 @@ -236,7 +236,7 @@ ; SSE-NEXT: mulps %xmm3, %xmm2 ; SSE-NEXT: rsqrtps %xmm1, %xmm5 ; SSE-NEXT: movaps %xmm5, %xmm3 -; SSE-NEXT: mulps %xmm3, %xmm3 +; SSE-NEXT: mulps %xmm5, %xmm3 ; SSE-NEXT: mulps %xmm1, %xmm3 ; SSE-NEXT: addps %xmm0, %xmm3 ; SSE-NEXT: mulps %xmm4, %xmm3 Index: test/CodeGen/X86/sse1.ll =================================================================== --- test/CodeGen/X86/sse1.ll +++ test/CodeGen/X86/sse1.ll @@ -16,7 +16,7 @@ ; X32-LABEL: test4: ; X32: # BB#0: # %entry ; X32-NEXT: movaps %xmm0, %xmm2 -; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] +; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] ; X32-NEXT: addss %xmm1, %xmm0 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; X32-NEXT: subss %xmm1, %xmm2 @@ -26,7 +26,7 @@ ; X64-LABEL: test4: ; X64: # BB#0: # %entry ; X64-NEXT: movaps %xmm0, %xmm2 -; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] +; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] ; X64-NEXT: addss %xmm1, %xmm0 ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; X64-NEXT: subss %xmm1, %xmm2 Index: test/CodeGen/X86/sse3-avx-addsub-2.ll =================================================================== --- test/CodeGen/X86/sse3-avx-addsub-2.ll +++ test/CodeGen/X86/sse3-avx-addsub-2.ll @@ -408,9 +408,9 @@ ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: subss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1] ; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1] +; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1] ; SSE-NEXT: subss %xmm4, %xmm3 ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE-NEXT: addss %xmm0, %xmm4 Index: test/CodeGen/X86/statepoint-live-in.ll =================================================================== --- test/CodeGen/X86/statepoint-live-in.ll +++ test/CodeGen/X86/statepoint-live-in.ll @@ -77,7 +77,7 @@ ; TODO: We could have reused the previous spill slot at zero additional cost. ; CHECK-LABEL: test6 ; CHECK: movl %edi, %ebx -; CHECK: movl %ebx, 12(%rsp) +; CHECK: movl %edi, 12(%rsp) ; CHECK-NEXT: callq _baz ; CHECK-NEXT: Ltmp ; CHECK-NEXT: callq _bar @@ -96,11 +96,11 @@ ; CHECK: Ltmp1-_test2 ; CHECK: .byte 1 ; CHECK-NEXT: .byte 4 -; CHECK-NEXT: .short 6 +; CHECK-NEXT: .short 5 ; CHECK-NEXT: .long 0 ; CHECK: .byte 1 ; CHECK-NEXT: .byte 4 -; CHECK-NEXT: .short 3 +; CHECK-NEXT: .short 4 ; CHECK-NEXT: .long 0 ; CHECK: Ltmp2-_test2 ; CHECK: .byte 1 Index: test/CodeGen/X86/statepoint-stack-usage.ll =================================================================== --- test/CodeGen/X86/statepoint-stack-usage.ll +++ test/CodeGen/X86/statepoint-stack-usage.ll @@ -61,9 +61,9 @@ gc "statepoint-example" { ; CHECK-LABEL: back_to_back_deopt ; The exact stores don't matter, but there need to be three stack slots created -; CHECK: movl %ebx, 12(%rsp) -; CHECK: movl %ebp, 8(%rsp) -; CHECK: movl %r14d, 4(%rsp) +; CHECK: movl %edi, 12(%rsp) +; CHECK: movl %esi, 8(%rsp) +; CHECK: movl %edx, 4(%rsp) ; CHECK: callq ; CHECK: movl %ebx, 12(%rsp) ; CHECK: movl %ebp, 8(%rsp) Index: test/CodeGen/X86/vec_fp_to_int.ll =================================================================== --- test/CodeGen/X86/vec_fp_to_int.ll +++ test/CodeGen/X86/vec_fp_to_int.ll @@ -1018,12 +1018,12 @@ ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movd %rax, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movd %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movd %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] @@ -1126,12 +1126,12 @@ ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movd %rax, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movd %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movd %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] @@ -1316,11 +1316,11 @@ ; SSE-LABEL: fptoui_4f32_to_4i32: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] ; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] @@ -1560,7 +1560,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm2[2,3] ; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] @@ -1572,11 +1572,11 @@ ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3] ; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[2,3] ; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] @@ -1687,7 +1687,7 @@ ; SSE-NEXT: cmovaeq %rcx, %rdx ; SSE-NEXT: movd %rdx, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1698,7 +1698,7 @@ ; SSE-NEXT: movd %rdx, %xmm3 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1865,7 +1865,7 @@ ; SSE-NEXT: cmovaeq %rcx, %rdx ; SSE-NEXT: movd %rdx, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1876,7 +1876,7 @@ ; SSE-NEXT: movd %rdx, %xmm3 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx Index: test/CodeGen/X86/vec_int_to_fp.ll =================================================================== --- test/CodeGen/X86/vec_int_to_fp.ll +++ test/CodeGen/X86/vec_int_to_fp.ll @@ -1610,7 +1610,7 @@ ; SSE-LABEL: uitofp_2i64_to_4f32: ; SSE: # BB#0: ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movd %xmm0, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB39_1 ; SSE-NEXT: # BB#2: Index: test/CodeGen/X86/vec_minmax_sint.ll =================================================================== --- test/CodeGen/X86/vec_minmax_sint.ll +++ test/CodeGen/X86/vec_minmax_sint.ll @@ -437,7 +437,7 @@ ; SSE42: # BB#0: ; SSE42-NEXT: movdqa %xmm0, %xmm2 ; SSE42-NEXT: movdqa %xmm1, %xmm3 -; SSE42-NEXT: pcmpgtq %xmm2, %xmm3 +; SSE42-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE42-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE42-NEXT: pxor %xmm3, %xmm0 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1 Index: test/CodeGen/X86/vec_shift4.ll =================================================================== --- test/CodeGen/X86/vec_shift4.ll +++ test/CodeGen/X86/vec_shift4.ll @@ -35,7 +35,7 @@ ; X32: # BB#0: # %entry ; X32-NEXT: movdqa %xmm0, %xmm2 ; X32-NEXT: psllw $5, %xmm1 -; X32-NEXT: movdqa %xmm2, %xmm3 +; X32-NEXT: movdqa %xmm0, %xmm3 ; X32-NEXT: psllw $4, %xmm3 ; X32-NEXT: pand {{\.LCPI.*}}, %xmm3 ; X32-NEXT: movdqa %xmm1, %xmm0 @@ -47,7 +47,7 @@ ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X32-NEXT: movdqa %xmm2, %xmm3 -; X32-NEXT: paddb %xmm3, %xmm3 +; X32-NEXT: paddb %xmm2, %xmm3 ; X32-NEXT: paddb %xmm1, %xmm1 ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2 @@ -58,7 +58,7 @@ ; X64: # BB#0: # %entry ; X64-NEXT: movdqa %xmm0, %xmm2 ; X64-NEXT: psllw $5, %xmm1 -; X64-NEXT: movdqa %xmm2, %xmm3 +; X64-NEXT: movdqa %xmm0, %xmm3 ; X64-NEXT: psllw $4, %xmm3 ; X64-NEXT: pand {{.*}}(%rip), %xmm3 ; X64-NEXT: movdqa %xmm1, %xmm0 @@ -70,7 +70,7 @@ ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X64-NEXT: movdqa %xmm2, %xmm3 -; X64-NEXT: paddb %xmm3, %xmm3 +; X64-NEXT: paddb %xmm2, %xmm3 ; X64-NEXT: paddb %xmm1, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2 Index: test/CodeGen/X86/vector-blend.ll =================================================================== --- test/CodeGen/X86/vector-blend.ll +++ test/CodeGen/X86/vector-blend.ll @@ -1026,7 +1026,7 @@ ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psrad $31, %xmm1 ; SSE41-NEXT: pxor %xmm3, %xmm3 -; SSE41-NEXT: psubd %xmm2, %xmm3 +; SSE41-NEXT: psubd %xmm0, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3 ; SSE41-NEXT: movaps %xmm3, %xmm0 Index: test/CodeGen/X86/vector-idiv-sdiv-128.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -176,13 +176,13 @@ ; SSE2-LABEL: test_div7_16i8: ; SSE2: # BB#0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 @@ -482,13 +482,13 @@ ; SSE2-LABEL: test_rem7_16i8: ; SSE2: # BB#0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 @@ -504,7 +504,7 @@ ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: paddb %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] ; SSE2-NEXT: pmullw %xmm3, %xmm2 Index: test/CodeGen/X86/vector-idiv-udiv-128.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-128.ll +++ test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -481,7 +481,7 @@ ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] ; SSE2-NEXT: pmullw %xmm3, %xmm2 Index: test/CodeGen/X86/vector-rotate-128.ll =================================================================== --- test/CodeGen/X86/vector-rotate-128.ll +++ test/CodeGen/X86/vector-rotate-128.ll @@ -351,7 +351,7 @@ ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm4 -; SSE41-NEXT: paddw %xmm4, %xmm4 +; SSE41-NEXT: paddw %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm3, %xmm6 ; SSE41-NEXT: psllw $8, %xmm6 ; SSE41-NEXT: movdqa %xmm3, %xmm5 @@ -376,7 +376,7 @@ ; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: por %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: paddw %xmm1, %xmm1 +; SSE41-NEXT: paddw %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm3, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm2, %xmm0 @@ -621,10 +621,10 @@ ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; SSE41-NEXT: psubb %xmm3, %xmm2 ; SSE41-NEXT: psllw $5, %xmm3 -; SSE41-NEXT: movdqa %xmm1, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: psllw $4, %xmm5 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5 -; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: movdqa %xmm0, %xmm4 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: movdqa %xmm4, %xmm5 @@ -634,13 +634,13 @@ ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: movdqa %xmm4, %xmm5 -; SSE41-NEXT: paddb %xmm5, %xmm5 +; SSE41-NEXT: paddb %xmm4, %xmm5 ; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: psllw $5, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm5 ; SSE41-NEXT: psrlw $4, %xmm5 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5 @@ -1206,7 +1206,7 @@ ; SSE41-LABEL: constant_rotate_v16i8: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: psllw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256] @@ -1218,7 +1218,7 @@ ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm1, %xmm3 Index: test/CodeGen/X86/vector-sext.ll =================================================================== --- test/CodeGen/X86/vector-sext.ll +++ test/CodeGen/X86/vector-sext.ll @@ -243,7 +243,7 @@ ; SSSE3-LABEL: sext_16i8_to_8i32: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: psrad $24, %xmm0 ; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7] @@ -312,7 +312,7 @@ ; SSSE3-LABEL: sext_16i8_to_16i32: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: psrad $24, %xmm0 ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15] @@ -443,7 +443,7 @@ ; SSSE3-LABEL: sext_16i8_to_4i64: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: movdqa %xmm0, %xmm2 ; SSSE3-NEXT: psrad $31, %xmm2 @@ -499,7 +499,7 @@ ; SSE2-LABEL: sext_16i8_to_8i64: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: psrad $31, %xmm2 @@ -1112,7 +1112,7 @@ ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: psrad $31, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: psrad $31, %xmm4 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] @@ -1131,7 +1131,7 @@ ; SSSE3-NEXT: movdqa %xmm1, %xmm2 ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: psrad $31, %xmm3 -; SSSE3-NEXT: movdqa %xmm2, %xmm4 +; SSSE3-NEXT: movdqa %xmm1, %xmm4 ; SSSE3-NEXT: psrad $31, %xmm4 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] @@ -2172,7 +2172,7 @@ ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-NEXT: pslld $31, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -2221,7 +2221,7 @@ ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSSE3-NEXT: pslld $31, %xmm0 ; SSSE3-NEXT: psrad $31, %xmm0 ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -3015,7 +3015,7 @@ ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-NEXT: psllw $15, %xmm0 ; SSE2-NEXT: psraw $15, %xmm0 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] @@ -3104,7 +3104,7 @@ ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSSE3-NEXT: psllw $15, %xmm0 ; SSSE3-NEXT: psraw $15, %xmm0 ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] Index: test/CodeGen/X86/vector-shift-ashr-128.ll =================================================================== --- test/CodeGen/X86/vector-shift-ashr-128.ll +++ test/CodeGen/X86/vector-shift-ashr-128.ll @@ -274,7 +274,7 @@ ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psraw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 Index: test/CodeGen/X86/vector-shift-lshr-128.ll =================================================================== --- test/CodeGen/X86/vector-shift-lshr-128.ll +++ test/CodeGen/X86/vector-shift-lshr-128.ll @@ -245,7 +245,7 @@ ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -407,7 +407,7 @@ ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: psrlw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -679,7 +679,7 @@ ; SSE41-NEXT: pshufb %xmm0, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psrlw $4, %xmm4 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4 @@ -1101,7 +1101,7 @@ ; SSE41-LABEL: constant_shift_v16i8: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psrlw $4, %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32] Index: test/CodeGen/X86/vector-shift-shl-128.ll =================================================================== --- test/CodeGen/X86/vector-shift-shl-128.ll +++ test/CodeGen/X86/vector-shift-shl-128.ll @@ -202,7 +202,7 @@ ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psllw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -361,7 +361,7 @@ ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: psllw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -373,7 +373,7 @@ ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: paddb %xmm1, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 @@ -627,7 +627,7 @@ ; SSE41-NEXT: pshufb %xmm0, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psllw $4, %xmm4 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4 @@ -639,7 +639,7 @@ ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: paddb %xmm2, %xmm1 ; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 @@ -957,7 +957,7 @@ ; SSE41-LABEL: constant_shift_v16i8: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32] @@ -968,7 +968,7 @@ ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: paddb %xmm2, %xmm2 +; SSE41-NEXT: paddb %xmm1, %xmm2 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 Index: test/CodeGen/X86/vector-shuffle-combining.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-combining.ll +++ test/CodeGen/X86/vector-shuffle-combining.ll @@ -2792,7 +2792,7 @@ ; SSE-LABEL: PR22377: ; SSE: # BB#0: # %entry ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3] ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2] ; SSE-NEXT: addps %xmm0, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] Index: test/CodeGen/X86/vector-trunc-math.ll =================================================================== --- test/CodeGen/X86/vector-trunc-math.ll +++ test/CodeGen/X86/vector-trunc-math.ll @@ -5198,7 +5198,7 @@ ; SSE-LABEL: mul_add_const_v4i64_v4i32: ; SSE: # BB#0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] Index: test/CodeGen/X86/vector-zext.ll =================================================================== --- test/CodeGen/X86/vector-zext.ll +++ test/CodeGen/X86/vector-zext.ll @@ -246,7 +246,7 @@ ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] @@ -261,7 +261,7 @@ ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: pxor %xmm4, %xmm4 -; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] @@ -399,7 +399,7 @@ ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: movdqa %xmm1, %xmm0 @@ -700,7 +700,7 @@ ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] @@ -715,7 +715,7 @@ ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: pxor %xmm4, %xmm4 -; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] @@ -1582,7 +1582,7 @@ ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE41-NEXT: retq ; @@ -1630,7 +1630,7 @@ ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero +; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE41-NEXT: retq ; Index: test/CodeGen/X86/vselect-minmax.ll =================================================================== --- test/CodeGen/X86/vselect-minmax.ll +++ test/CodeGen/X86/vselect-minmax.ll @@ -3344,12 +3344,12 @@ ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm3, %xmm8 ; SSE2-NEXT: movdqa %xmm2, %xmm9 -; SSE2-NEXT: movdqa %xmm8, %xmm12 +; SSE2-NEXT: movdqa %xmm3, %xmm12 ; SSE2-NEXT: pcmpgtb %xmm7, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm13, %xmm3 -; SSE2-NEXT: movdqa %xmm9, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -3487,12 +3487,12 @@ ; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm7, %xmm12 -; SSE2-NEXT: pcmpgtb %xmm8, %xmm12 +; SSE2-NEXT: pcmpgtb %xmm3, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtb %xmm9, %xmm13 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -4225,12 +4225,12 @@ ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm3, %xmm8 ; SSE2-NEXT: movdqa %xmm2, %xmm9 -; SSE2-NEXT: movdqa %xmm8, %xmm12 +; SSE2-NEXT: movdqa %xmm3, %xmm12 ; SSE2-NEXT: pcmpgtd %xmm7, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm13, %xmm3 -; SSE2-NEXT: movdqa %xmm9, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -4368,12 +4368,12 @@ ; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm7, %xmm12 -; SSE2-NEXT: pcmpgtd %xmm8, %xmm12 +; SSE2-NEXT: pcmpgtd %xmm3, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtd %xmm9, %xmm13 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -4890,7 +4890,7 @@ ; SSE2-LABEL: test122: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5164,7 +5164,7 @@ ; SSE2-LABEL: test124: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5467,7 +5467,7 @@ ; SSE2-LABEL: test126: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5795,7 +5795,7 @@ ; SSE2-LABEL: test128: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -6047,7 +6047,7 @@ ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm13, %xmm9 -; SSE2-NEXT: movdqa %xmm8, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -6190,7 +6190,7 @@ ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtb %xmm8, %xmm13 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -6941,7 +6941,7 @@ ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm13, %xmm9 -; SSE2-NEXT: movdqa %xmm8, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -7084,7 +7084,7 @@ ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtd %xmm8, %xmm13 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -7610,7 +7610,7 @@ ; SSE2-LABEL: test154: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -7882,7 +7882,7 @@ ; SSE2-LABEL: test156: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -8183,7 +8183,7 @@ ; SSE2-LABEL: test158: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -8509,7 +8509,7 @@ ; SSE2-LABEL: test160: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -10289,7 +10289,7 @@ ; SSE4: # BB#0: # %entry ; SSE4-NEXT: movdqa %xmm0, %xmm2 ; SSE4-NEXT: movdqa %xmm1, %xmm3 -; SSE4-NEXT: pcmpgtq %xmm2, %xmm3 +; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE4-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE4-NEXT: pxor %xmm3, %xmm0 ; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1 @@ -10768,7 +10768,7 @@ ; SSE4: # BB#0: # %entry ; SSE4-NEXT: movdqa %xmm0, %xmm2 ; SSE4-NEXT: movdqa %xmm1, %xmm3 -; SSE4-NEXT: pcmpgtq %xmm2, %xmm3 +; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE4-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE4-NEXT: pxor %xmm3, %xmm0 ; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2 Index: test/CodeGen/X86/widen_conv-3.ll =================================================================== --- test/CodeGen/X86/widen_conv-3.ll +++ test/CodeGen/X86/widen_conv-3.ll @@ -74,7 +74,7 @@ ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE2-NEXT: movss %xmm0, (%eax) ; X86-SSE2-NEXT: movaps %xmm0, %xmm1 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; X86-SSE2-NEXT: movss %xmm1, 8(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm0, 4(%eax) Index: test/CodeGen/X86/widen_conv-4.ll =================================================================== --- test/CodeGen/X86/widen_conv-4.ll +++ test/CodeGen/X86/widen_conv-4.ll @@ -19,7 +19,7 @@ ; X86-SSE2-NEXT: movups %xmm0, (%eax) ; X86-SSE2-NEXT: movss %xmm2, 16(%eax) ; X86-SSE2-NEXT: movaps %xmm2, %xmm0 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm2[1],xmm0[1] ; X86-SSE2-NEXT: movss %xmm0, 24(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm2, 20(%eax) @@ -100,7 +100,7 @@ ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE2-NEXT: movss %xmm0, (%eax) ; X86-SSE2-NEXT: movaps %xmm0, %xmm1 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; X86-SSE2-NEXT: movss %xmm1, 8(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm0, 4(%eax) Index: test/CodeGen/X86/x86-shrink-wrap-unwind.ll =================================================================== --- test/CodeGen/X86/x86-shrink-wrap-unwind.ll +++ test/CodeGen/X86/x86-shrink-wrap-unwind.ll @@ -23,7 +23,7 @@ ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] +; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Store %a in the alloca. @@ -69,7 +69,7 @@ ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] +; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. @@ -115,7 +115,7 @@ ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] +; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. Index: test/CodeGen/X86/x86-shrink-wrapping.ll =================================================================== --- test/CodeGen/X86/x86-shrink-wrapping.ll +++ test/CodeGen/X86/x86-shrink-wrapping.ll @@ -17,7 +17,7 @@ ; Compare the arguments and jump to exit. ; No prologue needed. ; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]] -; ENABLE-NEXT: cmpl %esi, [[ARG0CPY]] +; ENABLE-NEXT: cmpl %esi, %edi ; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. @@ -27,7 +27,7 @@ ; Compare the arguments and jump to exit. ; After the prologue is set. ; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]] -; DISABLE-NEXT: cmpl %esi, [[ARG0CPY]] +; DISABLE-NEXT: cmpl %esi, %edi ; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Store %a in the alloca.