diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -1205,12 +1205,12 @@ &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; } Register SrcReg = Src.getReg(); + isKill = MI.killsRegister(SrcReg); // For both LEA64 and LEA32 the register already has essentially the right // type (32-bit or 64-bit) we may just need to forbid SP. if (Opc != X86::LEA64_32r) { NewSrc = SrcReg; - isKill = Src.isKill(); assert(!Src.isUndef() && "Undef op doesn't need optimization"); if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC)) @@ -1225,8 +1225,7 @@ ImplicitOp = Src; ImplicitOp.setImplicit(); - NewSrc = getX86SubSuperRegister(Src.getReg(), 64); - isKill = Src.isKill(); + NewSrc = getX86SubSuperRegister(SrcReg, 64); assert(!Src.isUndef() && "Undef op doesn't need optimization"); } else { // Virtual register of the wrong class, we have to create a temporary 64-bit @@ -1235,7 +1234,7 @@ MachineInstr *Copy = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) - .add(Src); + .addReg(SrcReg, getKillRegState(isKill)); // Which is obviously going to be dead after we're done with it. isKill = true; @@ -1532,13 +1531,6 @@ else Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; - bool isKill; - Register SrcReg; - MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, - SrcReg, isKill, ImplicitOp, LV)) - return nullptr; - const MachineOperand &Src2 = MI.getOperand(2); bool isKill2; Register SrcReg2; @@ -1547,6 +1539,20 @@ SrcReg2, isKill2, ImplicitOp2, LV)) return nullptr; + bool isKill; + Register SrcReg; + MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); + if (Src.getReg() == Src2.getReg()) { + // Don't call classify LEAReg a second time on the same register, in case + // the first call inserted a COPY from Src2 and marked it as killed. + isKill = isKill2; + SrcReg = SrcReg2; + } else { + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, + SrcReg, isKill, ImplicitOp, LV)) + return nullptr; + } + MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); if (ImplicitOp.getReg() != 0) MIB.add(ImplicitOp); diff --git a/llvm/test/CodeGen/X86/twoaddr-mul2.mir b/llvm/test/CodeGen/X86/twoaddr-mul2.mir --- a/llvm/test/CodeGen/X86/twoaddr-mul2.mir +++ b/llvm/test/CodeGen/X86/twoaddr-mul2.mir @@ -1,7 +1,7 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=x86_64-unknown -mcpu=haswell -run-pass=twoaddressinstruction %s -o - | FileCheck %s +# RUN: llc -mtriple=x86_64-unknown -mcpu=haswell -run-pass=twoaddressinstruction -verify-machineinstrs %s -o - | FileCheck %s -# FIXME: The killed flag should be on the second COPY from [[COPY]], not the first one. +# Check that we don't have any uses of [[COPY]] after it is killed. --- name: test_mul_by_2 tracksRegLiveness: true @@ -13,9 +13,8 @@ ; CHECK: liveins: $edi ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY killed $edi - ; CHECK-NEXT: undef %2.sub_32bit:gr64 = COPY killed [[COPY]] - ; CHECK-NEXT: undef %3.sub_32bit:gr64_nosp = COPY [[COPY]] - ; CHECK-NEXT: [[LEA64_32r:%[0-9]+]]:gr32 = LEA64_32r killed %2, 1, killed %3, 0, $noreg + ; CHECK-NEXT: undef %2.sub_32bit:gr64_nosp = COPY killed [[COPY]] + ; CHECK-NEXT: [[LEA64_32r:%[0-9]+]]:gr32 = LEA64_32r killed %2, 1, killed %2, 0, $noreg ; CHECK-NEXT: $eax = COPY killed [[LEA64_32r]] ; CHECK-NEXT: RET 0, killed $eax %0:gr32 = COPY killed $edi