Index: llvm/lib/Target/X86/X86InstrInfo.cpp =================================================================== --- llvm/lib/Target/X86/X86InstrInfo.cpp +++ llvm/lib/Target/X86/X86InstrInfo.cpp @@ -3648,8 +3648,15 @@ std::optional X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { - if (MI.isMoveReg()) + if (MI.isMoveReg()) { + // FIXME: Dirty hack for apparent invariant that doesn't hold when + // subreg_to_reg is coalesced with ordinary copies, such that the bits that + // were asserted as 0 are now undef. + if (MI.getOperand(0).isUndef() && MI.getOperand(0).getSubReg()) + return std::nullopt; + return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; + } return std::nullopt; } Index: llvm/test/CodeGen/X86/GlobalISel/add-ext.ll =================================================================== --- llvm/test/CodeGen/X86/GlobalISel/add-ext.ll +++ llvm/test/CodeGen/X86/GlobalISel/add-ext.ll @@ -205,8 +205,8 @@ ; CHECK-NEXT: addq %rdi, %rcx ; CHECK-NEXT: movl (%rcx), %ecx ; CHECK-NEXT: addl (%rax), %ecx -; CHECK-NEXT: movl %esi, %esi -; CHECK-NEXT: imulq $4, %rsi, %rax +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: imulq $4, %rax, %rax ; CHECK-NEXT: addq %rdi, %rax ; CHECK-NEXT: movl %ecx, (%rax) ; CHECK-NEXT: retq Index: llvm/test/CodeGen/X86/dagcombine-cse.ll =================================================================== --- llvm/test/CodeGen/X86/dagcombine-cse.ll +++ llvm/test/CodeGen/X86/dagcombine-cse.ll @@ -106,24 +106,24 @@ ; ; X64-LABEL: square_high: ; X64: ## %bb.0: ## %entry -; X64-NEXT: movl %esi, %esi -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %rdi ; X64-NEXT: addq %r8, %rdx -; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rsi, %rax ; X64-NEXT: adcq $0, %rax ; X64-NEXT: addq %rdx, %r8 -; X64-NEXT: adcq %rcx, %rax -; X64-NEXT: imulq %rsi, %rsi -; X64-NEXT: addq %rax, %rsi -; X64-NEXT: shrdq $32, %rsi, %r8 -; X64-NEXT: shrq $32, %rsi +; X64-NEXT: adcq %rsi, %rax +; X64-NEXT: imulq %rcx, %rcx +; X64-NEXT: addq %rax, %rcx +; X64-NEXT: shrdq $32, %rcx, %r8 +; X64-NEXT: shrq $32, %rcx ; X64-NEXT: movq %r8, %rax -; X64-NEXT: movq %rsi, %rdx +; X64-NEXT: movq %rcx, %rdx ; X64-NEXT: retq entry: %conv = zext i96 %x to i192 Index: llvm/test/CodeGen/X86/fold-and-shift-x86_64.ll =================================================================== --- llvm/test/CodeGen/X86/fold-and-shift-x86_64.ll +++ llvm/test/CodeGen/X86/fold-and-shift-x86_64.ll @@ -34,8 +34,8 @@ define i8 @t3(ptr %X, i64 %i) { ; CHECK-LABEL: t3: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movl %esi, %esi -; CHECK-NEXT: movzbl (%rdi,%rsi,4), %eax +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: movzbl (%rdi,%rax,4), %eax ; CHECK-NEXT: retq entry: Index: llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-lowhigh.ll =================================================================== --- llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-lowhigh.ll +++ llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-lowhigh.ll @@ -78,18 +78,18 @@ define i64 @out64_constmask(i64 %x, i64 %y) { ; CHECK-NOBMI-LABEL: out64_constmask: ; CHECK-NOBMI: # %bb.0: -; CHECK-NOBMI-NEXT: movl %edi, %edi +; CHECK-NOBMI-NEXT: movl %edi, %ecx ; CHECK-NOBMI-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000 ; CHECK-NOBMI-NEXT: andq %rsi, %rax -; CHECK-NOBMI-NEXT: orq %rdi, %rax +; CHECK-NOBMI-NEXT: orq %rcx, %rax ; CHECK-NOBMI-NEXT: retq ; ; CHECK-BMI-LABEL: out64_constmask: ; CHECK-BMI: # %bb.0: -; CHECK-BMI-NEXT: movl %edi, %edi +; CHECK-BMI-NEXT: movl %edi, %ecx ; CHECK-BMI-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000 ; CHECK-BMI-NEXT: andq %rsi, %rax -; CHECK-BMI-NEXT: orq %rdi, %rax +; CHECK-BMI-NEXT: orq %rcx, %rax ; CHECK-BMI-NEXT: retq %mx = and i64 %x, 4294967295 %my = and i64 %y, -4294967296