Index: lib/Target/X86/X86InstrInfo.h =================================================================== --- lib/Target/X86/X86InstrInfo.h +++ lib/Target/X86/X86InstrInfo.h @@ -258,7 +258,7 @@ /// operand to the LEA instruction. bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, unsigned LEAOpcode, bool AllowSP, unsigned &NewSrc, - bool &isKill, bool &isUndef, MachineOperand &ImplicitOp, + bool &isKill, MachineOperand &ImplicitOp, LiveVariables *LV) const; /// convertToThreeAddress - This method must be implemented by targets that Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -739,8 +739,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, unsigned Opc, bool AllowSP, unsigned &NewSrc, - bool &isKill, bool &isUndef, - MachineOperand &ImplicitOp, + bool &isKill, MachineOperand &ImplicitOp, LiveVariables *LV) const { MachineFunction &MF = *MI.getParent()->getParent(); const TargetRegisterClass *RC; @@ -757,7 +756,7 @@ if (Opc != X86::LEA64_32r) { NewSrc = SrcReg; isKill = Src.isKill(); - isUndef = Src.isUndef(); + assert(!Src.isUndef() && "Undef op doesn't need optimization"); if (TargetRegisterInfo::isVirtualRegister(NewSrc) && !MF.getRegInfo().constrainRegClass(NewSrc, RC)) @@ -774,7 +773,7 @@ NewSrc = getX86SubSuperRegister(Src.getReg(), 64); isKill = Src.isKill(); - isUndef = Src.isUndef(); + assert(!Src.isUndef() && "Undef op doesn't need optimization"); } else { // Virtual register of the wrong class, we have to create a temporary 64-bit // vreg to feed into the LEA. @@ -786,7 +785,6 @@ // Which is obviously going to be dead after we're done with it. isKill = true; - isUndef = false; if (LV) LV->replaceKillInstruction(SrcReg, MI, *Copy); @@ -807,6 +805,7 @@ unsigned Src = MI.getOperand(1).getReg(); bool isDead = MI.getOperand(0).isDead(); bool isKill = MI.getOperand(1).isKill(); + assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization"); MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); @@ -858,6 +857,7 @@ case X86::ADD16rr_DB: { unsigned Src2 = MI.getOperand(2).getReg(); bool isKill2 = MI.getOperand(2).isKill(); + assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization"); unsigned leaInReg2 = 0; MachineInstr *InsMI2 = nullptr; if (Src == Src2) { @@ -926,6 +926,16 @@ const MachineOperand &Dest = MI.getOperand(0); const MachineOperand &Src = MI.getOperand(1); + // Ideally, operations with undef should be folded before we get here, but we + // can't guarantee it. Bail out because optimizing undefs is a waste of time. + // Without this, we have to forward undef state to new register operands to + // avoid machine verifier errors. + if (Src.isUndef()) + return nullptr; + if (MI.getNumOperands() > 2) + if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef()) + return nullptr; + MachineInstr *NewMI = nullptr; // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When // we have better subtarget support, enable the 16-bit LEA generation here. @@ -964,11 +974,11 @@ unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; // LEA can't handle ESP. - bool isKill, isUndef; + bool isKill; unsigned SrcReg; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, - SrcReg, isKill, isUndef, ImplicitOp, LV)) + SrcReg, isKill, ImplicitOp, LV)) return nullptr; MachineInstrBuilder MIB = @@ -976,7 +986,7 @@ .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) - .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) + .addReg(SrcReg, getKillRegState(isKill)) .addImm(0) .addReg(0); if (ImplicitOp.getReg() != 0) @@ -1007,18 +1017,17 @@ assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!"); unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r : (is64Bit ? X86::LEA64_32r : X86::LEA32r); - bool isKill, isUndef; + bool isKill; unsigned SrcReg; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, - SrcReg, isKill, isUndef, ImplicitOp, LV)) + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, + ImplicitOp, LV)) return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) .add(Dest) - .addReg(SrcReg, - getKillRegState(isKill) | getUndefRegState(isUndef)); + .addReg(SrcReg, getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) MIB.add(ImplicitOp); @@ -1039,17 +1048,16 @@ unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r : (is64Bit ? X86::LEA64_32r : X86::LEA32r); - bool isKill, isUndef; + bool isKill; unsigned SrcReg; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, - SrcReg, isKill, isUndef, ImplicitOp, LV)) + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, + ImplicitOp, LV)) return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) .add(Dest) - .addReg(SrcReg, getUndefRegState(isUndef) | - getKillRegState(isKill)); + .addReg(SrcReg, getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) MIB.add(ImplicitOp); @@ -1076,19 +1084,19 @@ else Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; - bool isKill, isUndef; + bool isKill; unsigned SrcReg; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, - SrcReg, isKill, isUndef, ImplicitOp, LV)) + SrcReg, isKill, ImplicitOp, LV)) return nullptr; const MachineOperand &Src2 = MI.getOperand(2); - bool isKill2, isUndef2; + bool isKill2; unsigned SrcReg2; MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false, - SrcReg2, isKill2, isUndef2, ImplicitOp2, LV)) + SrcReg2, isKill2, ImplicitOp2, LV)) return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); @@ -1098,11 +1106,6 @@ MIB.add(ImplicitOp2); NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); - - // Preserve undefness of the operands. - NewMI->getOperand(1).setIsUndef(isUndef); - NewMI->getOperand(3).setIsUndef(isUndef2); - if (LV && Src2.isKill()) LV->replaceKillInstruction(SrcReg2, MI, *NewMI); break; @@ -1118,11 +1121,8 @@ NewMI = addRegReg(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest), Src.getReg(), Src.isKill(), Src2, isKill2); - // Preserve undefness of the operands. - bool isUndef = MI.getOperand(1).isUndef(); - bool isUndef2 = MI.getOperand(2).isUndef(); - NewMI->getOperand(1).setIsUndef(isUndef); - NewMI->getOperand(3).setIsUndef(isUndef2); + assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization"); + assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization"); if (LV && isKill2) LV->replaceKillInstruction(Src2, MI, *NewMI); @@ -1144,17 +1144,16 @@ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; - bool isKill, isUndef; + bool isKill; unsigned SrcReg; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, - SrcReg, isKill, isUndef, ImplicitOp, LV)) + SrcReg, isKill, ImplicitOp, LV)) return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) .add(Dest) - .addReg(SrcReg, getUndefRegState(isUndef) | - getKillRegState(isKill)); + .addReg(SrcReg, getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) MIB.add(ImplicitOp); Index: test/CodeGen/X86/GlobalISel/undef.ll =================================================================== --- test/CodeGen/X86/GlobalISel/undef.ll +++ test/CodeGen/X86/GlobalISel/undef.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL define i8 @test() { ; ALL-LABEL: test: @@ -8,8 +8,8 @@ ret i8 undef } -define i8 @test2(i8 %a) { -; ALL-LABEL: test2: +define i8 @add_undef_i8(i8 %a) { +; ALL-LABEL: add_undef_i8: ; ALL: # %bb.0: ; ALL-NEXT: movl %edi, %eax ; ALL-NEXT: addb %al, %al @@ -19,6 +19,35 @@ ret i8 %r } +define i16 @add_undef_i16(i16 %a) { +; ALL-LABEL: add_undef_i16: +; ALL: # %bb.0: +; ALL-NEXT: movl %edi, %eax +; ALL-NEXT: addw %ax, %ax +; ALL-NEXT: # kill: def $ax killed $ax killed $eax +; ALL-NEXT: retq + %r = add i16 %a, undef + ret i16 %r +} + +define i16 @add_undef_i16_commute(i16 %a) { +; ALL-LABEL: add_undef_i16_commute: +; ALL: # %bb.0: +; ALL-NEXT: addw %di, %ax +; ALL-NEXT: retq + %r = add i16 undef, %a + ret i16 %r +} + +define i32 @add_undef_i32(i32 %a) { +; ALL-LABEL: add_undef_i32: +; ALL: # %bb.0: +; ALL-NEXT: movl %edi, %eax +; ALL-NEXT: addl %eax, %eax +; ALL-NEXT: retq + %r = add i32 %a, undef + ret i32 %r +} define float @test3() { ; ALL-LABEL: test3: