Index: lib/Target/X86/MCTargetDesc/X86BaseInfo.h =================================================================== --- lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -351,10 +351,16 @@ /// MRMSrcMemCC = 36, + /// MRMXm - This form is used for instructions that use the Mod/RM byte + /// to specify a memory source, but doesn't use the middle field. And has + /// a condition code. + /// + MRMXmCC = 38, + /// MRMXm - This form is used for instructions that use the Mod/RM byte /// to specify a memory source, but doesn't use the middle field. /// - MRMXm = 39, // Instruction that uses Mod/RM but not the middle field. + MRMXm = 39, // Next, instructions that operate on a memory r/m operand... MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43, // Format /0 /1 /2 /3 @@ -385,10 +391,16 @@ /// MRMSrcRegCC = 52, + /// MRMXCCr - This form is used for instructions that use the Mod/RM byte + /// to specify a register source, but doesn't use the middle field. And has + /// a condition code. + /// + MRMXrCC = 54, + /// MRMXr - This form is used for instructions that use the Mod/RM byte /// to specify a register source, but doesn't use the middle field. /// - MRMXr = 55, // Instruction that uses Mod/RM but not the middle field. + MRMXr = 55, // Instructions that operate on a register r/m operand... MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59, // Format /0 /1 /2 /3 @@ -779,12 +791,14 @@ case X86II::MRMSrcReg4VOp3: case X86II::MRMSrcRegOp4: case X86II::MRMSrcRegCC: + case X86II::MRMXrCC: case X86II::MRMXr: case X86II::MRM0r: case X86II::MRM1r: case X86II::MRM2r: case X86II::MRM3r: case X86II::MRM4r: case X86II::MRM5r: case X86II::MRM6r: case X86II::MRM7r: return -1; + case X86II::MRMXmCC: case X86II::MRMXm: case X86II::MRM0m: case X86II::MRM1m: case X86II::MRM2m: case X86II::MRM3m: Index: lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp =================================================================== --- lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -1081,7 +1081,7 @@ CurOp += X86::AddrNumOperands; REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R break; - case X86II::MRMXm: + case X86II::MRMXmCC: case X86II::MRMXm: case X86II::MRM0m: case X86II::MRM1m: case X86II::MRM2m: case X86II::MRM3m: case X86II::MRM4m: case X86II::MRM5m: @@ -1089,7 +1089,7 @@ REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X break; - case X86II::MRMXr: + case X86II::MRMXrCC: case X86II::MRMXr: case X86II::MRM0r: case X86II::MRM1r: case X86II::MRM2r: case X86II::MRM3r: case X86II::MRM4r: case X86II::MRM5r: @@ -1506,6 +1506,15 @@ break; } + case X86II::MRMXrCC: { + unsigned RegOp = CurOp++; + + unsigned CC = MI.getOperand(CurOp++).getImm(); + EmitByte(BaseOpcode + CC, CurByte, OS); + EmitRegModRMByte(MI.getOperand(RegOp), 0, CurByte, OS); + break; + } + case X86II::MRMXr: case X86II::MRM0r: case X86II::MRM1r: case X86II::MRM2r: case X86II::MRM3r: @@ -1521,6 +1530,17 @@ CurByte, OS); break; + case X86II::MRMXmCC: { + unsigned FirstMemOp = CurOp; + CurOp = FirstMemOp + X86::AddrNumOperands; + + unsigned CC = MI.getOperand(CurOp++).getImm(); + EmitByte(BaseOpcode + CC, CurByte, OS); + + emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Rex, CurByte, OS, Fixups, STI); + break; + } + case X86II::MRMXm: case X86II::MRM0m: case X86II::MRM1m: case X86II::MRM2m: case X86II::MRM3m: Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -1480,8 +1480,8 @@ // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction. static const uint16_t SETFOpcTable[2][3] = { - { X86::SETEr, X86::SETNPr, X86::AND8rr }, - { X86::SETNEr, X86::SETPr, X86::OR8rr } + { X86::COND_E, X86::COND_NP, X86::AND8rr }, + { X86::COND_NE, X86::COND_P, X86::OR8rr } }; const uint16_t *SETFOpc = nullptr; switch (Predicate) { @@ -1497,10 +1497,10 @@ unsigned FlagReg1 = createResultReg(&X86::GR8RegClass); unsigned FlagReg2 = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]), - FlagReg1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]), - FlagReg2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + FlagReg1).addImm(SETFOpc[0]); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + FlagReg2).addImm(SETFOpc[1]); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]), ResultReg).addReg(FlagReg1).addReg(FlagReg2); updateValueMap(I, ResultReg); @@ -1511,7 +1511,6 @@ bool SwapArgs; std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate); assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); - unsigned Opc = X86::getSETFromCond(CC); if (SwapArgs) std::swap(LHS, RHS); @@ -1520,7 +1519,8 @@ if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc())) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + ResultReg).addImm(CC); updateValueMap(I, ResultReg); return true; } @@ -2047,8 +2047,8 @@ // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction. static const uint16_t SETFOpcTable[2][3] = { - { X86::SETNPr, X86::SETEr , X86::TEST8rr }, - { X86::SETPr, X86::SETNEr, X86::OR8rr } + { X86::COND_NP, X86::COND_E, X86::TEST8rr }, + { X86::COND_P, X86::COND_NE, X86::OR8rr } }; const uint16_t *SETFOpc = nullptr; switch (Predicate) { @@ -2080,10 +2080,10 @@ if (SETFOpc) { unsigned FlagReg1 = createResultReg(&X86::GR8RegClass); unsigned FlagReg2 = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]), - FlagReg1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]), - FlagReg2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + FlagReg1).addImm(SETFOpc[0]); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + FlagReg2).addImm(SETFOpc[1]); auto const &II = TII.get(SETFOpc[2]); if (II.getNumDefs()) { unsigned TmpReg = createResultReg(&X86::GR8RegClass); @@ -2897,21 +2897,21 @@ isCommutativeIntrinsic(II)) std::swap(LHS, RHS); - unsigned BaseOpc, CondOpc; + unsigned BaseOpc, CondCode; switch (II->getIntrinsicID()) { default: llvm_unreachable("Unexpected intrinsic!"); case Intrinsic::sadd_with_overflow: - BaseOpc = ISD::ADD; CondOpc = X86::SETOr; break; + BaseOpc = ISD::ADD; CondCode = X86::COND_O; break; case Intrinsic::uadd_with_overflow: - BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break; + BaseOpc = ISD::ADD; CondCode = X86::COND_B; break; case Intrinsic::ssub_with_overflow: - BaseOpc = ISD::SUB; CondOpc = X86::SETOr; break; + BaseOpc = ISD::SUB; CondCode = X86::COND_O; break; case Intrinsic::usub_with_overflow: - BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break; + BaseOpc = ISD::SUB; CondCode = X86::COND_B; break; case Intrinsic::smul_with_overflow: - BaseOpc = X86ISD::SMUL; CondOpc = X86::SETOr; break; + BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break; case Intrinsic::umul_with_overflow: - BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break; + BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break; } unsigned LHSReg = getRegForValue(LHS); @@ -2928,7 +2928,7 @@ }; if (CI->isOne() && (BaseOpc == ISD::ADD || BaseOpc == ISD::SUB) && - CondOpc == X86::SETOr) { + CondCode == X86::COND_O) { // We can use INC/DEC. ResultReg = createResultReg(TLI.getRegClassFor(VT)); bool IsDec = BaseOpc == ISD::SUB; @@ -2987,8 +2987,8 @@ // Assign to a GPR since the overflow return value is lowered to a SETcc. unsigned ResultReg2 = createResultReg(&X86::GR8RegClass); assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers."); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc), - ResultReg2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + ResultReg2).addImm(CondCode); updateValueMap(II, ResultReg, 2); return true; Index: lib/Target/X86/X86FixupLEAs.cpp =================================================================== --- lib/Target/X86/X86FixupLEAs.cpp +++ lib/Target/X86/X86FixupLEAs.cpp @@ -153,6 +153,12 @@ MFI->insert(MBBI, NewMI); // Insert the new inst return NewMI; } + } + + if (!MI.isConvertibleTo3Addr()) + return nullptr; + + switch (MI.getOpcode()) { case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD64ri32_DB: Index: lib/Target/X86/X86FixupSetCC.cpp =================================================================== --- lib/Target/X86/X86FixupSetCC.cpp +++ lib/Target/X86/X86FixupSetCC.cpp @@ -67,30 +67,6 @@ FunctionPass *llvm::createX86FixupSetCC() { return new X86FixupSetCCPass(); } -bool X86FixupSetCCPass::isSetCCr(unsigned Opcode) { - switch (Opcode) { - default: - return false; - case X86::SETOr: - case X86::SETNOr: - case X86::SETBr: - case X86::SETAEr: - case X86::SETEr: - case X86::SETNEr: - case X86::SETBEr: - case X86::SETAr: - case X86::SETSr: - case X86::SETNSr: - case X86::SETPr: - case X86::SETNPr: - case X86::SETLr: - case X86::SETGEr: - case X86::SETLEr: - case X86::SETGr: - return true; - } -} - // We expect the instruction *immediately* before the setcc to imp-def // EFLAGS (because of scheduling glue). To make this less brittle w.r.t // scheduling, look backwards until we hit the beginning of the @@ -128,7 +104,7 @@ // Find a setcc that is used by a zext. // This doesn't have to be the only use, the transformation is safe // regardless. - if (!isSetCCr(MI.getOpcode())) + if (MI.getOpcode() != X86::SETCCr) continue; MachineInstr *ZExt = nullptr; Index: lib/Target/X86/X86FlagsCopyLowering.cpp =================================================================== --- lib/Target/X86/X86FlagsCopyLowering.cpp +++ lib/Target/X86/X86FlagsCopyLowering.cpp @@ -601,8 +601,7 @@ // Otherwise we can just rewrite in-place. if (X86::getCondFromCMov(MI) != X86::COND_INVALID) { rewriteCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs); - } else if (X86::getCondFromSETOpc(MI.getOpcode()) != - X86::COND_INVALID) { + } else if (X86::getCondFromSETCC(MI) != X86::COND_INVALID) { rewriteSetCC(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs); } else if (MI.getOpcode() == TargetOpcode::COPY) { rewriteCopy(MI, *FlagUse, CopyDefI); @@ -729,7 +728,7 @@ // Scan backwards across the range of instructions with live EFLAGS. for (MachineInstr &MI : llvm::reverse(llvm::make_range(MBB.begin(), TestPos))) { - X86::CondCode Cond = X86::getCondFromSETOpc(MI.getOpcode()); + X86::CondCode Cond = X86::getCondFromSETCC(MI); if (Cond != X86::COND_INVALID && !MI.mayStore() && MI.getOperand(0).isReg() && TRI->isVirtualRegister(MI.getOperand(0).getReg())) { assert(MI.getOperand(0).isDef() && @@ -750,7 +749,7 @@ DebugLoc TestLoc, X86::CondCode Cond) { unsigned Reg = MRI->createVirtualRegister(PromoteRC); auto SetI = BuildMI(TestMBB, TestPos, TestLoc, - TII->get(X86::getSETFromCond(Cond)), Reg); + TII->get(X86::SETCCr), Reg).addImm(Cond); (void)SetI; LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump()); ++NumSetCCsInserted; @@ -1023,7 +1022,7 @@ MachineInstr &SetCCI, MachineOperand &FlagUse, CondRegArray &CondRegs) { - X86::CondCode Cond = X86::getCondFromSETOpc(SetCCI.getOpcode()); + X86::CondCode Cond = X86::getCondFromSETCC(SetCCI); // Note that we can't usefully rewrite this to the inverse without complex // analysis of the users of the setCC. Largely we rely on duplicates which // could have been avoided already being avoided here. Index: lib/Target/X86/X86ISelDAGToDAG.cpp =================================================================== --- lib/Target/X86/X86ISelDAGToDAG.cpp +++ lib/Target/X86/X86ISelDAGToDAG.cpp @@ -2331,11 +2331,14 @@ X86::CondCode CC = X86::COND_INVALID; if (CC == X86::COND_INVALID) CC = X86::getCondFromBranchOpc(N->getMachineOpcode()); - if (CC == X86::COND_INVALID) - CC = X86::getCondFromSETOpc(N->getMachineOpcode()); if (CC == X86::COND_INVALID) { unsigned Opc = N->getMachineOpcode(); - if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr || Opc == X86::CMOV64rr) + if (Opc == X86::SETCCr) + CC = static_cast(N->getConstantOperandVal(0)); + else if (Opc == X86::SETCCm) + CC = static_cast(N->getConstantOperandVal(5)); + else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr || + Opc == X86::CMOV64rr) CC = static_cast(N->getConstantOperandVal(2)); else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm || Opc == X86::CMOV64rm) Index: lib/Target/X86/X86InstrCMovSetCC.td =================================================================== --- lib/Target/X86/X86InstrCMovSetCC.td +++ lib/Target/X86/X86InstrCMovSetCC.td @@ -55,69 +55,52 @@ } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst" } // isCodeGenOnly = 1, ForceDisassemble = 1 -multiclass CMOV_Aliases { - def : InstAlias, + TB, Sched<[WriteSETCC]>; + def SETCCm : I<0x90, MRMXmCC, (outs), (ins i8mem:$dst, ccode:$cond), + "set${cond}\t$dst", + [(store (X86setcc imm:$cond, EFLAGS), addr:$dst)]>, + TB, Sched<[WriteSETCCStore]>; +} // Uses = [EFLAGS] + +multiclass CMOV_SETCC_Aliases { + def : InstAlias<"cmov"#Cond#"{w}\t{$src, $dst|$dst, $src}", (CMOV16rr GR16:$dst, GR16:$src, CC), 0>; - def : InstAlias; - def : InstAlias; - def : InstAlias; - def : InstAlias; - def : InstAlias; -} -defm : CMOV_Aliases<"cmovo" , 0>; -defm : CMOV_Aliases<"cmovno", 1>; -defm : CMOV_Aliases<"cmovb" , 2>; -defm : CMOV_Aliases<"cmovae", 3>; -defm : CMOV_Aliases<"cmove" , 4>; -defm : CMOV_Aliases<"cmovne", 5>; -defm : CMOV_Aliases<"cmovbe", 6>; -defm : CMOV_Aliases<"cmova" , 7>; -defm : CMOV_Aliases<"cmovs" , 8>; -defm : CMOV_Aliases<"cmovns", 9>; -defm : CMOV_Aliases<"cmovp" , 10>; -defm : CMOV_Aliases<"cmovnp", 11>; -defm : CMOV_Aliases<"cmovl" , 12>; -defm : CMOV_Aliases<"cmovge", 13>; -defm : CMOV_Aliases<"cmovle", 14>; -defm : CMOV_Aliases<"cmovg" , 15>; - - -// SetCC instructions. -multiclass SETCC opc, string Mnemonic, PatLeaf OpNode> { - let Uses = [EFLAGS] in { - def r : I, - TB, Sched<[WriteSETCC]>; - def m : I, - TB, Sched<[WriteSETCCStore]>; - } // Uses = [EFLAGS] + def : InstAlias<"set"#Cond#"\t$dst", (SETCCr GR8:$dst, CC), 0>; + def : InstAlias<"set"#Cond#"\t$dst", (SETCCm i8mem:$dst, CC), 0>; } -defm SETO : SETCC<0x90, "seto", X86_COND_O>; // is overflow bit set -defm SETNO : SETCC<0x91, "setno", X86_COND_NO>; // is overflow bit not set -defm SETB : SETCC<0x92, "setb", X86_COND_B>; // unsigned less than -defm SETAE : SETCC<0x93, "setae", X86_COND_AE>; // unsigned greater or equal -defm SETE : SETCC<0x94, "sete", X86_COND_E>; // equal to -defm SETNE : SETCC<0x95, "setne", X86_COND_NE>; // not equal to -defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>; // unsigned less than or equal -defm SETA : SETCC<0x97, "seta", X86_COND_A>; // unsigned greater than -defm SETS : SETCC<0x98, "sets", X86_COND_S>; // is signed bit set -defm SETNS : SETCC<0x99, "setns", X86_COND_NS>; // is not signed -defm SETP : SETCC<0x9A, "setp", X86_COND_P>; // is parity bit set -defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>; // is parity bit not set -defm SETL : SETCC<0x9C, "setl", X86_COND_L>; // signed less than -defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>; // signed greater or equal -defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>; // signed less than or equal -defm SETG : SETCC<0x9F, "setg", X86_COND_G>; // signed greater than +defm : CMOV_SETCC_Aliases<"o" , 0>; +defm : CMOV_SETCC_Aliases<"no", 1>; +defm : CMOV_SETCC_Aliases<"b" , 2>; +defm : CMOV_SETCC_Aliases<"ae", 3>; +defm : CMOV_SETCC_Aliases<"e" , 4>; +defm : CMOV_SETCC_Aliases<"ne", 5>; +defm : CMOV_SETCC_Aliases<"be", 6>; +defm : CMOV_SETCC_Aliases<"a" , 7>; +defm : CMOV_SETCC_Aliases<"s" , 8>; +defm : CMOV_SETCC_Aliases<"ns", 9>; +defm : CMOV_SETCC_Aliases<"p" , 10>; +defm : CMOV_SETCC_Aliases<"np", 11>; +defm : CMOV_SETCC_Aliases<"l" , 12>; +defm : CMOV_SETCC_Aliases<"ge", 13>; +defm : CMOV_SETCC_Aliases<"le", 14>; +defm : CMOV_SETCC_Aliases<"g" , 15>; // SALC is an undocumented instruction. Information for this instruction can be found // here http://www.rcollins.org/secrets/opcodes/SALC.html Index: lib/Target/X86/X86InstrCompiler.td =================================================================== --- lib/Target/X86/X86InstrCompiler.td +++ lib/Target/X86/X86InstrCompiler.td @@ -354,7 +354,7 @@ // this happens, it is great. However, if we are left with an 8-bit sbb and an // and, we might as well just match it as a setb. def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), - (SETBr)>; + (SETCCr (i8 2))>; // Patterns to give priority when both inputs are zero so that we don't use // an immediate for the RHS. Index: lib/Target/X86/X86InstrFoldTables.cpp =================================================================== --- lib/Target/X86/X86InstrFoldTables.cpp +++ lib/Target/X86/X86InstrFoldTables.cpp @@ -322,22 +322,7 @@ { X86::PUSH16r, X86::PUSH16rmm, TB_FOLDED_LOAD }, { X86::PUSH32r, X86::PUSH32rmm, TB_FOLDED_LOAD }, { X86::PUSH64r, X86::PUSH64rmm, TB_FOLDED_LOAD }, - { X86::SETAEr, X86::SETAEm, TB_FOLDED_STORE }, - { X86::SETAr, X86::SETAm, TB_FOLDED_STORE }, - { X86::SETBEr, X86::SETBEm, TB_FOLDED_STORE }, - { X86::SETBr, X86::SETBm, TB_FOLDED_STORE }, - { X86::SETEr, X86::SETEm, TB_FOLDED_STORE }, - { X86::SETGEr, X86::SETGEm, TB_FOLDED_STORE }, - { X86::SETGr, X86::SETGm, TB_FOLDED_STORE }, - { X86::SETLEr, X86::SETLEm, TB_FOLDED_STORE }, - { X86::SETLr, X86::SETLm, TB_FOLDED_STORE }, - { X86::SETNEr, X86::SETNEm, TB_FOLDED_STORE }, - { X86::SETNOr, X86::SETNOm, TB_FOLDED_STORE }, - { X86::SETNPr, X86::SETNPm, TB_FOLDED_STORE }, - { X86::SETNSr, X86::SETNSm, TB_FOLDED_STORE }, - { X86::SETOr, X86::SETOm, TB_FOLDED_STORE }, - { X86::SETPr, X86::SETPm, TB_FOLDED_STORE }, - { X86::SETSr, X86::SETSm, TB_FOLDED_STORE }, + { X86::SETCCr, X86::SETCCm, TB_FOLDED_STORE }, { X86::TAILJMPr, X86::TAILJMPm, TB_FOLDED_LOAD }, { X86::TAILJMPr64, X86::TAILJMPm64, TB_FOLDED_LOAD }, { X86::TAILJMPr64_REX, X86::TAILJMPm64_REX, TB_FOLDED_LOAD }, Index: lib/Target/X86/X86InstrFormats.td =================================================================== --- lib/Target/X86/X86InstrFormats.td +++ lib/Target/X86/X86InstrFormats.td @@ -31,6 +31,7 @@ def MRMSrcMem4VOp3 : Format<34>; def MRMSrcMemOp4 : Format<35>; def MRMSrcMemCC : Format<36>; +def MRMXmCC: Format<38>; def MRMXm : Format<39>; def MRM0m : Format<40>; def MRM1m : Format<41>; def MRM2m : Format<42>; def MRM3m : Format<43>; def MRM4m : Format<44>; def MRM5m : Format<45>; @@ -40,6 +41,7 @@ def MRMSrcReg4VOp3 : Format<50>; def MRMSrcRegOp4 : Format<51>; def MRMSrcRegCC : Format<52>; +def MRMXrCC: Format<54>; def MRMXr : Format<55>; def MRM0r : Format<56>; def MRM1r : Format<57>; def MRM2r : Format<58>; def MRM3r : Format<59>; def MRM4r : Format<60>; def MRM5r : Format<61>; Index: lib/Target/X86/X86InstrInfo.h =================================================================== --- lib/Target/X86/X86InstrInfo.h +++ lib/Target/X86/X86InstrInfo.h @@ -42,9 +42,8 @@ /// the instruction operands should be swaped to match the condition code. std::pair getX86ConditionCode(CmpInst::Predicate Predicate); -/// Return a set opcode for the given condition and whether it has -/// a memory operand. -unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false); +/// Return a setcc opcode based on whether it has a memory operand. +unsigned getSETOpc(bool HasMemoryOperand = false); /// Return a cmov opcode for the given register size in bytes, and operand type. unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand = false); @@ -53,7 +52,7 @@ CondCode getCondFromBranchOpc(unsigned Opc); // Turn setCC opcode into condition code. -CondCode getCondFromSETOpc(unsigned Opc); +CondCode getCondFromSETCC(const MachineInstr &MI); // Turn CMov opcode into condition code. CondCode getCondFromCMov(const MachineInstr &MI); Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -2001,26 +2001,13 @@ } } -/// Return condition code of a SET opcode. -X86::CondCode X86::getCondFromSETOpc(unsigned Opc) { - switch (Opc) { +/// Return condition code of a SETCC opcode. +X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) { + switch (MI.getOpcode()) { default: return X86::COND_INVALID; - case X86::SETAr: case X86::SETAm: return X86::COND_A; - case X86::SETAEr: case X86::SETAEm: return X86::COND_AE; - case X86::SETBr: case X86::SETBm: return X86::COND_B; - case X86::SETBEr: case X86::SETBEm: return X86::COND_BE; - case X86::SETEr: case X86::SETEm: return X86::COND_E; - case X86::SETGr: case X86::SETGm: return X86::COND_G; - case X86::SETGEr: case X86::SETGEm: return X86::COND_GE; - case X86::SETLr: case X86::SETLm: return X86::COND_L; - case X86::SETLEr: case X86::SETLEm: return X86::COND_LE; - case X86::SETNEr: case X86::SETNEm: return X86::COND_NE; - case X86::SETNOr: case X86::SETNOm: return X86::COND_NO; - case X86::SETNPr: case X86::SETNPm: return X86::COND_NP; - case X86::SETNSr: case X86::SETNSm: return X86::COND_NS; - case X86::SETOr: case X86::SETOm: return X86::COND_O; - case X86::SETPr: case X86::SETPm: return X86::COND_P; - case X86::SETSr: case X86::SETSm: return X86::COND_S; + case X86::SETCCr: case X86::SETCCm: + return static_cast( + MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); } } @@ -2139,30 +2126,9 @@ return std::make_pair(CC, NeedSwap); } -/// Return a set opcode for the given condition and -/// whether it has memory operand. -unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) { - static const uint16_t Opc[16][2] = { - { X86::SETOr, X86::SETOm }, - { X86::SETNOr, X86::SETNOm }, - { X86::SETBr, X86::SETBm }, - { X86::SETAEr, X86::SETAEm }, - { X86::SETEr, X86::SETEm }, - { X86::SETNEr, X86::SETNEm }, - { X86::SETBEr, X86::SETBEm }, - { X86::SETAr, X86::SETAm }, - { X86::SETSr, X86::SETSm }, - { X86::SETNSr, X86::SETNSm }, - { X86::SETPr, X86::SETPm }, - { X86::SETNPr, X86::SETNPm }, - { X86::SETLr, X86::SETLm }, - { X86::SETGEr, X86::SETGEm }, - { X86::SETLEr, X86::SETLEm }, - { X86::SETGr, X86::SETGm }, - }; - - assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes"); - return Opc[CC][HasMemoryOperand ? 1 : 0]; +/// Return a setcc opcode based on whether it has memory operand. +unsigned X86::getSETOpc(bool HasMemoryOperand) { + return HasMemoryOperand ? X86::SETCCr : X86::SETCCm; } /// Return a cmov opcode for the given register size in bytes, and operand type. @@ -3555,7 +3521,7 @@ // If we are done with the basic block, we need to check whether EFLAGS is // live-out. bool IsSafe = false; - SmallVector, 4> OpsToUpdate; + SmallVector, 4> OpsToUpdate; MachineBasicBlock::iterator E = CmpInstr.getParent()->end(); for (++I; I != E; ++I) { const MachineInstr &Instr = *I; @@ -3572,16 +3538,13 @@ // EFLAGS is used by this instruction. X86::CondCode OldCC = X86::COND_INVALID; - bool OpcIsSET = false; if (IsCmpZero || IsSwapped) { // We decode the condition code from opcode. if (Instr.isBranch()) OldCC = X86::getCondFromBranchOpc(Instr.getOpcode()); else { - OldCC = X86::getCondFromSETOpc(Instr.getOpcode()); - if (OldCC != X86::COND_INVALID) - OpcIsSET = true; - else + OldCC = X86::getCondFromSETCC(Instr); + if (OldCC == X86::COND_INVALID) OldCC = X86::getCondFromCMov(Instr); } if (OldCC == X86::COND_INVALID) return false; @@ -3627,21 +3590,10 @@ } if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) { - // Synthesize the new opcode. - bool HasMemoryOperand = Instr.hasOneMemOperand(); - unsigned NewOpc; - if (Instr.isBranch()) - NewOpc = GetCondBranchFromCond(ReplacementCC); - else if(OpcIsSET) - NewOpc = getSETFromCond(ReplacementCC, HasMemoryOperand); - else { - NewOpc = ReplacementCC; - } - // Push the MachineInstr to OpsToUpdate. // If it is safe to remove CmpInstr, the condition code of these // instructions will be modified. - OpsToUpdate.push_back(std::make_pair(&*I, NewOpc)); + OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC)); } if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { // It is safe to remove CmpInstr if EFLAGS is updated again or killed. @@ -3696,11 +3648,11 @@ // Modify the condition code of instructions in OpsToUpdate. for (auto &Op : OpsToUpdate) { - if (X86::getCondFromCMov(*Op.first) != X86::COND_INVALID) + if (Op.first->isBranch()) + Op.first->setDesc(get(GetCondBranchFromCond(Op.second))); + else Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1) .setImm(Op.second); - else - Op.first->setDesc(get(Op.second)); } return true; } Index: lib/Target/X86/X86InstructionSelector.cpp =================================================================== --- lib/Target/X86/X86InstructionSelector.cpp +++ lib/Target/X86/X86InstructionSelector.cpp @@ -947,7 +947,6 @@ bool SwapArgs; std::tie(CC, SwapArgs) = X86::getX86ConditionCode( (CmpInst::Predicate)I.getOperand(1).getPredicate()); - unsigned OpSet = X86::getSETFromCond(CC); unsigned LHS = I.getOperand(2).getReg(); unsigned RHS = I.getOperand(3).getReg(); @@ -981,7 +980,7 @@ .addReg(RHS); MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(), - TII.get(OpSet), I.getOperand(0).getReg()); + TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC); constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI); constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI); @@ -1002,8 +1001,8 @@ // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction. static const uint16_t SETFOpcTable[2][3] = { - {X86::SETEr, X86::SETNPr, X86::AND8rr}, - {X86::SETNEr, X86::SETPr, X86::OR8rr}}; + {X86::COND_E, X86::COND_NP, X86::AND8rr}, + {X86::COND_NE, X86::COND_P, X86::OR8rr}}; const uint16_t *SETFOpc = nullptr; switch (Predicate) { default: @@ -1043,9 +1042,9 @@ unsigned FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass); unsigned FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass); MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), - TII.get(SETFOpc[0]), FlagReg1); + TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]); MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), - TII.get(SETFOpc[1]), FlagReg2); + TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]); MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SETFOpc[2]), ResultReg) .addReg(FlagReg1) @@ -1063,7 +1062,6 @@ bool SwapArgs; std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate); assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); - unsigned Opc = X86::getSETFromCond(CC); if (SwapArgs) std::swap(LhsReg, RhsReg); @@ -1075,7 +1073,7 @@ .addReg(RhsReg); MachineInstr &Set = - *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc), ResultReg); + *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC); constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI); constrainSelectedInstRegOperands(Set, TII, TRI, RBI); I.eraseFromParent(); Index: lib/Target/X86/X86SchedBroadwell.td =================================================================== --- lib/Target/X86/X86SchedBroadwell.td +++ lib/Target/X86/X86SchedBroadwell.td @@ -736,7 +736,6 @@ ADC16i16, SBB16i16, ADC32i32, SBB32i32, ADC64i32, SBB64i32)>; -def: InstRW<[BWWriteResGroup20], (instregex "SET(A|BE)r")>; def BWWriteResGroup22 : SchedWriteRes<[BWPort4,BWPort6,BWPort237]> { let Latency = 2; @@ -815,7 +814,6 @@ let ResourceCycles = [1,1,1,1]; } def: InstRW<[BWWriteResGroup38], (instrs CALL64pcrel32)>; -def: InstRW<[BWWriteResGroup38], (instregex "SET(A|BE)m")>; def BWWriteResGroup39 : SchedWriteRes<[BWPort0,BWPort1]> { let Latency = 4; @@ -1627,4 +1625,30 @@ def : InstRW<[BWCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>; def : InstRW<[BWCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>; +// SETCCs that use both Z and C flag require an extra uop. +def BWWriteSETA_SETBEr : SchedWriteRes<[BWPort06,BWPort0156]> { + let Latency = 2; + let ResourceCycles = [1,1]; + let NumMicroOps = 2; +} + +def BWWriteSETA_SETBEm : SchedWriteRes<[BWPort4,BWPort237,BWPort06,BWPort0156]> { + let Latency = 3; + let ResourceCycles = [1,1,1,1]; + let NumMicroOps = 4; +} + +def BWSETA_SETBErr : SchedWriteVariant<[ + SchedVar, [BWWriteSETA_SETBEr]>, + SchedVar +]>; + +def BWSETA_SETBErm : SchedWriteVariant<[ + SchedVar, [BWWriteSETA_SETBEm]>, + SchedVar +]>; + +def : InstRW<[BWSETA_SETBErr], (instrs SETCCr)>; +def : InstRW<[BWSETA_SETBErm], (instrs SETCCm)>; + } // SchedModel Index: lib/Target/X86/X86SchedHaswell.td =================================================================== --- lib/Target/X86/X86SchedHaswell.td +++ lib/Target/X86/X86SchedHaswell.td @@ -1126,7 +1126,6 @@ let ResourceCycles = [1,1]; } def: InstRW<[HWWriteResGroup35], (instrs CWD, JCXZ, JECXZ, JRCXZ)>; -def: InstRW<[HWWriteResGroup35], (instregex "SET(A|BE)r")>; def HWWriteResGroup36_2 : SchedWriteRes<[HWPort5,HWPort23]> { let Latency = 7; @@ -1172,7 +1171,6 @@ let ResourceCycles = [1,1,1,1]; } def: InstRW<[HWWriteResGroup45], (instrs CALL64pcrel32)>; -def: InstRW<[HWWriteResGroup45], (instregex "SET(A|BE)m")>; def HWWriteResGroup46 : SchedWriteRes<[HWPort4,HWPort23,HWPort237,HWPort06]> { let Latency = 8; @@ -1911,4 +1909,30 @@ def : InstRW<[HWCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>; def : InstRW<[HWCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>; +// SETCCs that use both Z and C flag require an extra uop. +def HWWriteSETA_SETBEr : SchedWriteRes<[HWPort06,HWPort0156]> { + let Latency = 2; + let ResourceCycles = [1,1]; + let NumMicroOps = 2; +} + +def HWWriteSETA_SETBEm : SchedWriteRes<[HWPort4,HWPort237,HWPort06,HWPort0156]> { + let Latency = 3; + let ResourceCycles = [1,1,1,1]; + let NumMicroOps = 4; +} + +def HWSETA_SETBErr : SchedWriteVariant<[ + SchedVar, [HWWriteSETA_SETBEr]>, + SchedVar +]>; + +def HWSETA_SETBErm : SchedWriteVariant<[ + SchedVar, [HWWriteSETA_SETBEm]>, + SchedVar +]>; + +def : InstRW<[HWSETA_SETBErr], (instrs SETCCr)>; +def : InstRW<[HWSETA_SETBErm], (instrs SETCCm)>; + } // SchedModel Index: lib/Target/X86/X86SchedPredicates.td =================================================================== --- lib/Target/X86/X86SchedPredicates.td +++ lib/Target/X86/X86SchedPredicates.td @@ -72,3 +72,15 @@ CheckImmOperand_s<7, "X86::COND_A">, CheckImmOperand_s<7, "X86::COND_BE"> ]>; + +// A predicate to check for COND_A and COND_BE SETCCs which have an extra uop +// on recent Intel CPUs. +def IsSETAr_Or_SETBEr : CheckAny<[ + CheckImmOperand_s<1, "X86::COND_A">, + CheckImmOperand_s<1, "X86::COND_BE"> +]>; + +def IsSETAm_Or_SETBEm : CheckAny<[ + CheckImmOperand_s<5, "X86::COND_A">, + CheckImmOperand_s<5, "X86::COND_BE"> +]>; Index: lib/Target/X86/X86SchedSandyBridge.td =================================================================== --- lib/Target/X86/X86SchedSandyBridge.td +++ lib/Target/X86/X86SchedSandyBridge.td @@ -615,13 +615,6 @@ MMX_PSIGNDrr, MMX_PSIGNWrr)>; -def SBWriteResGroup9 : SchedWriteRes<[SBPort05]> { - let Latency = 2; - let NumMicroOps = 2; - let ResourceCycles = [2]; -} -def: InstRW<[SBWriteResGroup9], (instregex "SET(A|BE)r")>; - def SBWriteResGroup11 : SchedWriteRes<[SBPort015]> { let Latency = 2; let NumMicroOps = 2; @@ -772,13 +765,6 @@ } def: InstRW<[SBWriteResGroup41], (instrs FNINIT)>; -def SBWriteResGroup43 : SchedWriteRes<[SBPort4,SBPort23,SBPort05]> { - let Latency = 3; - let NumMicroOps = 4; - let ResourceCycles = [1,1,2]; -} -def: InstRW<[SBWriteResGroup43], (instregex "SET(A|BE)m")>; - def SBWriteResGroup45 : SchedWriteRes<[SBPort0,SBPort4,SBPort23,SBPort15]> { let Latency = 5; let NumMicroOps = 4; @@ -1198,4 +1184,30 @@ def : InstRW<[SBCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>; def : InstRW<[SBCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>; +// SETCCs that use both Z and C flag require an extra uop. +def SBWriteSETA_SETBEr : SchedWriteRes<[SBPort05]> { + let Latency = 2; + let ResourceCycles = [2]; + let NumMicroOps = 2; +} + +def SBWriteSETA_SETBEm : SchedWriteRes<[SBPort4,SBPort23,SBPort05]> { + let Latency = 3; + let ResourceCycles = [1,1,2]; + let NumMicroOps = 4; +} + +def SBSETA_SETBErr : SchedWriteVariant<[ + SchedVar, [SBWriteSETA_SETBEr]>, + SchedVar +]>; + +def SBSETA_SETBErm : SchedWriteVariant<[ + SchedVar, [SBWriteSETA_SETBEm]>, + SchedVar +]>; + +def : InstRW<[SBSETA_SETBErr], (instrs SETCCr)>; +def : InstRW<[SBSETA_SETBErm], (instrs SETCCm)>; + } // SchedModel Index: lib/Target/X86/X86SchedSkylakeClient.td =================================================================== --- lib/Target/X86/X86SchedSkylakeClient.td +++ lib/Target/X86/X86SchedSkylakeClient.td @@ -698,13 +698,6 @@ def: InstRW<[SKLWriteResGroup14], (instrs FDECSTP, MMX_MOVDQ2Qrr)>; -def SKLWriteResGroup15 : SchedWriteRes<[SKLPort06]> { - let Latency = 2; - let NumMicroOps = 2; - let ResourceCycles = [2]; -} -def: InstRW<[SKLWriteResGroup15], (instregex "SET(A|BE)r")>; - def SKLWriteResGroup17 : SchedWriteRes<[SKLPort0156]> { let Latency = 2; let NumMicroOps = 2; @@ -840,13 +833,6 @@ } def: InstRW<[SKLWriteResGroup43], (instrs FNSTSWm)>; -def SKLWriteResGroup44 : SchedWriteRes<[SKLPort4,SKLPort237,SKLPort06]> { - let Latency = 3; - let NumMicroOps = 4; - let ResourceCycles = [1,1,2]; -} -def: InstRW<[SKLWriteResGroup44], (instregex "SET(A|BE)m")>; - def SKLWriteResGroup45 : SchedWriteRes<[SKLPort4,SKLPort6,SKLPort237,SKLPort0156]> { let Latency = 3; let NumMicroOps = 4; @@ -1782,4 +1768,30 @@ def : InstRW<[SKLCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>; def : InstRW<[SKLCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>; +// SETCCs that use both Z and C flag require an extra uop. +def SKLWriteSETA_SETBEr : SchedWriteRes<[SKLPort06]> { + let Latency = 2; + let ResourceCycles = [2]; + let NumMicroOps = 2; +} + +def SKLWriteSETA_SETBEm : SchedWriteRes<[SKLPort4,SKLPort237,SKLPort06]> { + let Latency = 3; + let ResourceCycles = [1,1,2]; + let NumMicroOps = 4; +} + +def SKLSETA_SETBErr : SchedWriteVariant<[ + SchedVar, [SKLWriteSETA_SETBEr]>, + SchedVar +]>; + +def SKLSETA_SETBErm : SchedWriteVariant<[ + SchedVar, [SKLWriteSETA_SETBEm]>, + SchedVar +]>; + +def : InstRW<[SKLSETA_SETBErr], (instrs SETCCr)>; +def : InstRW<[SKLSETA_SETBErm], (instrs SETCCm)>; + } // SchedModel Index: lib/Target/X86/X86SchedSkylakeServer.td =================================================================== --- lib/Target/X86/X86SchedSkylakeServer.td +++ lib/Target/X86/X86SchedSkylakeServer.td @@ -722,13 +722,6 @@ def: InstRW<[SKXWriteResGroup14], (instrs FDECSTP, MMX_MOVDQ2Qrr)>; -def SKXWriteResGroup15 : SchedWriteRes<[SKXPort06]> { - let Latency = 2; - let NumMicroOps = 2; - let ResourceCycles = [2]; -} -def: InstRW<[SKXWriteResGroup15], (instregex "SET(A|BE)r")>; - def SKXWriteResGroup17 : SchedWriteRes<[SKXPort0156]> { let Latency = 2; let NumMicroOps = 2; @@ -901,13 +894,6 @@ } def: InstRW<[SKXWriteResGroup45], (instrs FNSTSWm)>; -def SKXWriteResGroup46 : SchedWriteRes<[SKXPort4,SKXPort237,SKXPort06]> { - let Latency = 3; - let NumMicroOps = 4; - let ResourceCycles = [1,1,2]; -} -def: InstRW<[SKXWriteResGroup46], (instregex "SET(A|BE)m")>; - def SKXWriteResGroup47 : SchedWriteRes<[SKXPort4,SKXPort6,SKXPort237,SKXPort0156]> { let Latency = 3; let NumMicroOps = 4; @@ -2498,4 +2484,30 @@ def : InstRW<[SKXCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>; def : InstRW<[SKXCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>; +// SETCCs that use both Z and C flag require an extra uop. +def SKXWriteSETA_SETBEr : SchedWriteRes<[SKXPort06]> { + let Latency = 2; + let ResourceCycles = [2]; + let NumMicroOps = 2; +} + +def SKXWriteSETA_SETBEm : SchedWriteRes<[SKXPort4,SKXPort237,SKXPort06]> { + let Latency = 3; + let ResourceCycles = [1,1,2]; + let NumMicroOps = 4; +} + +def SKXSETA_SETBErr : SchedWriteVariant<[ + SchedVar, [SKXWriteSETA_SETBEr]>, + SchedVar +]>; + +def SKXSETA_SETBErm : SchedWriteVariant<[ + SchedVar, [SKXWriteSETA_SETBEm]>, + SchedVar +]>; + +def : InstRW<[SKXSETA_SETBErr], (instrs SETCCr)>; +def : InstRW<[SKXSETA_SETBErm], (instrs SETCCm)>; + } // SchedModel Index: lib/Target/X86/X86ScheduleBdVer2.td =================================================================== --- lib/Target/X86/X86ScheduleBdVer2.td +++ lib/Target/X86/X86ScheduleBdVer2.td @@ -472,8 +472,15 @@ let ResourceCycles = [2]; let NumMicroOps = 2; } -def : InstRW<[PdWriteSETGEmSETGmSETLEmSETLm], (instrs SETGEm, SETGm, - SETLEm, SETLm)>; + +def PdSETGEmSETGmSETLEmSETLm : SchedWriteVariant<[ + SchedVar>, [PdWriteSETGEmSETGmSETLEmSETLm]>, + SchedVar>, [PdWriteSETGEmSETGmSETLEmSETLm]>, + SchedVar>, [PdWriteSETGEmSETGmSETLEmSETLm]>, + SchedVar>, [PdWriteSETGEmSETGmSETLEmSETLm]>, + SchedVar +]>; +def : InstRW<[PdSETGEmSETGmSETLEmSETLm], (instrs SETCCm)>; defm : PdWriteRes; Index: test/CodeGen/X86/GlobalISel/select-cmp.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-cmp.mir +++ test/CodeGen/X86/GlobalISel/select-cmp.mir @@ -99,8 +99,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $dil ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY $sil ; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -130,8 +130,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY $di ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY $si ; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -161,8 +161,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi ; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -192,8 +192,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -223,8 +223,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETNEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -254,8 +254,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -285,8 +285,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -316,8 +316,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -347,8 +347,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -378,8 +378,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -409,8 +409,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETGEr:%[0-9]+]]:gr8 = SETGEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 13, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -440,8 +440,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETLr:%[0-9]+]]:gr8 = SETLr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 12, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax @@ -471,8 +471,8 @@ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETLEr:%[0-9]+]]:gr8 = SETLEr implicit $eflags - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLEr]], %subreg.sub_8bit + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 14, implicit $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags ; CHECK: $eax = COPY [[AND32ri8_]] ; CHECK: RET 0, implicit $eax Index: test/CodeGen/X86/GlobalISel/select-phi.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-phi.mir +++ test/CodeGen/X86/GlobalISel/select-phi.mir @@ -127,8 +127,8 @@ ; ALL: [[COPY4:%[0-9]+]]:gr8 = COPY [[COPY3]].sub_8bit ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags + ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags ; ALL: JNE_1 %bb.2, implicit $eflags ; ALL: bb.1.cond.false: ; ALL: successors: %bb.2(0x80000000) @@ -186,8 +186,8 @@ ; ALL: [[COPY4:%[0-9]+]]:gr16 = COPY [[COPY3]].sub_16bit ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags + ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags ; ALL: JNE_1 %bb.2, implicit $eflags ; ALL: bb.1.cond.false: ; ALL: successors: %bb.2(0x80000000) @@ -241,8 +241,8 @@ ; ALL: [[COPY2:%[0-9]+]]:gr32 = COPY $edx ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags + ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags ; ALL: JNE_1 %bb.1, implicit $eflags ; ALL: JMP_1 %bb.2 ; ALL: bb.1.cond.true: @@ -304,8 +304,8 @@ ; ALL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdx ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags + ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags ; ALL: JNE_1 %bb.1, implicit $eflags ; ALL: JMP_1 %bb.2 ; ALL: bb.1.cond.true: @@ -376,8 +376,8 @@ ; ALL: [[COPY4:%[0-9]+]]:fr32 = COPY [[COPY3]] ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags + ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags ; ALL: JNE_1 %bb.2, implicit $eflags ; ALL: bb.1.cond.false: ; ALL: successors: %bb.2(0x80000000) @@ -437,8 +437,8 @@ ; ALL: [[COPY4:%[0-9]+]]:fr64 = COPY [[COPY3]] ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags + ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags ; ALL: JNE_1 %bb.2, implicit $eflags ; ALL: bb.1.cond.false: ; ALL: successors: %bb.2(0x80000000) Index: test/CodeGen/X86/GlobalISel/x86_64-select-fcmp.mir =================================================================== --- test/CodeGen/X86/GlobalISel/x86_64-select-fcmp.mir +++ test/CodeGen/X86/GlobalISel/x86_64-select-fcmp.mir @@ -169,9 +169,9 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags - ; CHECK: [[SETNPr:%[0-9]+]]:gr8 = SETNPr implicit $eflags - ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETEr]], [[SETNPr]], implicit-def $eflags + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags ; CHECK: $al = COPY [[AND8rr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 @@ -209,8 +209,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags - ; CHECK: $al = COPY [[SETAr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -247,8 +247,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags - ; CHECK: $al = COPY [[SETAEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -285,8 +285,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY3]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags - ; CHECK: $al = COPY [[SETAr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -323,8 +323,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY3]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags - ; CHECK: $al = COPY [[SETAEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -361,8 +361,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags - ; CHECK: $al = COPY [[SETNEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -399,8 +399,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETNPr:%[0-9]+]]:gr8 = SETNPr implicit $eflags - ; CHECK: $al = COPY [[SETNPr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -437,8 +437,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETPr:%[0-9]+]]:gr8 = SETPr implicit $eflags - ; CHECK: $al = COPY [[SETPr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -475,8 +475,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags - ; CHECK: $al = COPY [[SETEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -513,8 +513,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY3]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags - ; CHECK: $al = COPY [[SETBr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -551,8 +551,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY3]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags - ; CHECK: $al = COPY [[SETBEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -589,8 +589,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags - ; CHECK: $al = COPY [[SETBr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -627,8 +627,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags - ; CHECK: $al = COPY [[SETBEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s32) = G_TRUNC %2(s128) @@ -665,9 +665,9 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]] ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags - ; CHECK: [[SETPr:%[0-9]+]]:gr8 = SETPr implicit $eflags - ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETNEr]], [[SETPr]], implicit-def $eflags + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags + ; CHECK: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags ; CHECK: $al = COPY [[OR8rr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 @@ -705,9 +705,9 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags - ; CHECK: [[SETNPr:%[0-9]+]]:gr8 = SETNPr implicit $eflags - ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETEr]], [[SETNPr]], implicit-def $eflags + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags ; CHECK: $al = COPY [[AND8rr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 @@ -745,8 +745,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags - ; CHECK: $al = COPY [[SETAr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -783,8 +783,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags - ; CHECK: $al = COPY [[SETAEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -821,8 +821,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY3]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags - ; CHECK: $al = COPY [[SETAr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -859,8 +859,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY3]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags - ; CHECK: $al = COPY [[SETAEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -897,8 +897,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags - ; CHECK: $al = COPY [[SETNEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -935,8 +935,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETNPr:%[0-9]+]]:gr8 = SETNPr implicit $eflags - ; CHECK: $al = COPY [[SETNPr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -973,8 +973,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETPr:%[0-9]+]]:gr8 = SETPr implicit $eflags - ; CHECK: $al = COPY [[SETPr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -1011,8 +1011,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags - ; CHECK: $al = COPY [[SETEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -1049,8 +1049,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY3]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags - ; CHECK: $al = COPY [[SETBr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -1087,8 +1087,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY3]], [[COPY1]], implicit-def $eflags - ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags - ; CHECK: $al = COPY [[SETBEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -1125,8 +1125,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags - ; CHECK: $al = COPY [[SETBr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -1163,8 +1163,8 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags - ; CHECK: $al = COPY [[SETBEr]] + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 %0:vecr(s64) = G_TRUNC %2(s128) @@ -1201,9 +1201,9 @@ ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]] ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags - ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags - ; CHECK: [[SETPr:%[0-9]+]]:gr8 = SETPr implicit $eflags - ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETNEr]], [[SETPr]], implicit-def $eflags + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags + ; CHECK: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags ; CHECK: $al = COPY [[OR8rr]] ; CHECK: RET 0, implicit $al %2:vecr(s128) = COPY $xmm0 Index: test/CodeGen/X86/flags-copy-lowering.mir =================================================================== --- test/CodeGen/X86/flags-copy-lowering.mir +++ test/CodeGen/X86/flags-copy-lowering.mir @@ -119,8 +119,8 @@ CMP64rr %0, %1, implicit-def $eflags %2:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags - ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -175,8 +175,8 @@ CMP64rr %0, %1, implicit-def $eflags %2:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags - ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -230,10 +230,10 @@ CMP64rr %0, %1, implicit-def $eflags %2:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags - ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags - ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags - ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETNEr implicit $eflags + ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags + ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETCCr 5, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -241,10 +241,10 @@ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp $eflags = COPY %2 - %3:gr8 = SETAr implicit $eflags - %4:gr8 = SETBr implicit $eflags - %5:gr8 = SETEr implicit $eflags - SETNEm $rsp, 1, $noreg, -16, $noreg, implicit killed $eflags + %3:gr8 = SETCCr 7, implicit $eflags + %4:gr8 = SETCCr 2, implicit $eflags + %5:gr8 = SETCCr 4, implicit $eflags + SETCCm $rsp, 1, $noreg, -16, $noreg, 5, implicit killed $eflags MOV8mr $rsp, 1, $noreg, -16, $noreg, killed %3 MOV8mr $rsp, 1, $noreg, -16, $noreg, killed %4 MOV8mr $rsp, 1, $noreg, -16, $noreg, killed %5 @@ -273,9 +273,9 @@ CMP64rr %0, %1, implicit-def $eflags %2:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags - ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags - ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags + ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags + ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -319,7 +319,7 @@ %2:gr64 = ADD64rr %0, %1, implicit-def $eflags %3:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -353,7 +353,7 @@ %2:gr64 = SUB64rr %0, %1, implicit-def $eflags %3:gr64 = COPY killed $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -387,8 +387,8 @@ %2:gr64 = ADD64rr %0, %1, implicit-def $eflags %3:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags - ; CHECK-NEXT: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK-NEXT: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -426,8 +426,8 @@ %2:gr64 = ADD64rr %0, %1, implicit-def $eflags %3:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags - ; CHECK-NEXT: %[[OF_REG:[^:]*]]:gr8 = SETOr implicit $eflags + ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK-NEXT: %[[OF_REG:[^:]*]]:gr8 = SETCCr 0, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -465,7 +465,7 @@ %2:gr64 = ADD64rr %0, %1, implicit-def $eflags %3:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -499,7 +499,7 @@ %2:gr64 = ADD64rr %0, %1, implicit-def $eflags %3:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -533,7 +533,7 @@ %2:gr64 = ADD64rr %0, %1, implicit-def $eflags %3:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -599,10 +599,10 @@ CMP64rr %0, %1, implicit-def $eflags %2:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[S_REG:[^:]*]]:gr8 = SETSr implicit $eflags - ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETNEr implicit $eflags - ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags - ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[S_REG:[^:]*]]:gr8 = SETCCr 8, implicit $eflags + ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETCCr 5, implicit $eflags + ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -672,12 +672,12 @@ CMP64rr %0, %1, implicit-def $eflags %2:gr64 = COPY $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[S_REG:[^:]*]]:gr8 = SETSr implicit $eflags - ; CHECK-NEXT: %[[P_REG:[^:]*]]:gr8 = SETPr implicit $eflags - ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETNEr implicit $eflags - ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags - ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags - ; CHECK-NEXT: %[[O_REG:[^:]*]]:gr8 = SETOr implicit $eflags + ; CHECK: %[[S_REG:[^:]*]]:gr8 = SETCCr 8, implicit $eflags + ; CHECK-NEXT: %[[P_REG:[^:]*]]:gr8 = SETCCr 10, implicit $eflags + ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETCCr 5, implicit $eflags + ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags + ; CHECK-NEXT: %[[O_REG:[^:]*]]:gr8 = SETCCr 0, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -824,9 +824,9 @@ JMP_1 %bb.4 ; CHECK: bb.1: ; CHECK-NOT: COPY{{( killed)?}} $eflags - ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags - ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags - ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags + ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags + ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags bb.2: @@ -962,12 +962,12 @@ %0:gr64 = COPY $rdi %1:gr64 = COPY $rsi CMP64rr %0, %1, implicit-def $eflags - %2:gr8 = SETAr implicit $eflags - %3:gr8 = SETAEr implicit $eflags + %2:gr8 = SETCCr 7, implicit $eflags + %3:gr8 = SETCCr 3, implicit $eflags %4:gr64 = COPY $eflags ; CHECK: CMP64rr %0, %1, implicit-def $eflags - ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags - ; CHECK-NEXT: %[[AE_REG:[^:]*]]:gr8 = SETAEr implicit $eflags + ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags + ; CHECK-NEXT: %[[AE_REG:[^:]*]]:gr8 = SETCCr 3, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp @@ -1020,15 +1020,15 @@ %0:gr64 = COPY $rdi %1:gr64 = COPY $rsi CMP64rr %0, %1, implicit-def $eflags - SETEm %0, 1, $noreg, -16, $noreg, implicit $eflags + SETCCm %0, 1, $noreg, -16, $noreg, 4, implicit $eflags %2:gr64 = COPY $eflags ; CHECK: CMP64rr %0, %1, implicit-def $eflags ; We cannot reuse this SETE because it stores the flag directly to memory, ; so we have two SETEs here. FIXME: It'd be great if something could fold ; these automatically. If not, maybe we want to unfold SETcc instructions ; writing to memory so we can reuse them. - ; CHECK-NEXT: SETEm {{.*}} implicit $eflags - ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags + ; CHECK-NEXT: SETCCm {{.*}} 4, implicit $eflags + ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags ; CHECK-NOT: COPY{{( killed)?}} $eflags ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp Index: test/CodeGen/X86/pr27681.mir =================================================================== --- test/CodeGen/X86/pr27681.mir +++ test/CodeGen/X86/pr27681.mir @@ -45,7 +45,7 @@ $ebp = SHR32rCL killed $ebp, implicit-def dead $eflags, implicit $cl $ebp = XOR32rr killed $ebp, killed $ebx, implicit-def dead $eflags TEST32rr $edx, $edx, implicit-def $eflags - $cl = SETNEr implicit $eflags + $cl = SETCCr 5, implicit $eflags ; This %bl def is antidependent on the above use of $ebx $bl = MOV8rm $esp, 1, $noreg, 3, _ ; :: (load 1 from %stack.0) $cl = OR8rr killed $cl, $bl, implicit-def dead $eflags @@ -54,7 +54,7 @@ $ecx = MOV32rm $esp, 1, $noreg, 24, _ ; :: (load 4 from %stack.2) $edx = SAR32rCL killed $edx, implicit-def dead $eflags, implicit $cl TEST32rr killed $edx, $edx, implicit-def $eflags - $cl = SETNEr implicit $eflags + $cl = SETCCr 5, implicit $eflags ; Verify that removal of the $bl antidependence does not use $ch ; as a replacement register. ; CHECK: $cl = AND8rr killed $cl, killed $b @@ -67,7 +67,7 @@ liveins: $cl, $eax, $ebp, $esi OR32mr $esp, 1, $noreg, 8, $noreg, killed $eax, implicit-def $eflags ; :: (store 4 into %stack.1) - $dl = SETNEr implicit $eflags, implicit-def $edx + $dl = SETCCr 5, implicit $eflags, implicit-def $edx bb.3: liveins: $cl, $ebp, $edx, $esi Index: test/CodeGen/X86/stack-folding-adx.mir =================================================================== --- test/CodeGen/X86/stack-folding-adx.mir +++ test/CodeGen/X86/stack-folding-adx.mir @@ -93,10 +93,10 @@ ; CHECK: dead [[MOV32rm]].sub_8bit:gr32 = ADD8ri [[MOV32rm]].sub_8bit, -1, implicit-def $eflags ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %stack.2, 1, $noreg, 0, $noreg :: (load 4 from %stack.2) ; CHECK: [[ADCX32rm:%[0-9]+]]:gr32 = ADCX32rm [[ADCX32rm]], %stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit killed $eflags :: (load 4 from %stack.1) - ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit killed $eflags + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit killed $eflags ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) ; CHECK: MOV32mr [[MOV64rm]], 1, $noreg, 0, $noreg, [[ADCX32rm]] :: (store 4 into %ir.4, align 1) - ; CHECK: $al = COPY [[SETBr]] + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, $al %3:gr64 = COPY $rcx %2:gr32 = COPY $edx @@ -105,7 +105,7 @@ INLINEASM &nop, 1, 3145738, def dead %4, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15 dead %0.sub_8bit:gr32 = ADD8ri %0.sub_8bit, -1, implicit-def $eflags %7:gr32 = ADCX32rr %7, %2, implicit-def $eflags, implicit killed $eflags - %8:gr8 = SETBr implicit killed $eflags + %8:gr8 = SETCCr 2, implicit killed $eflags MOV32mr %3, 1, $noreg, 0, $noreg, %7 :: (store 4 into %ir.4, align 1) $al = COPY %8 RET 0, killed $al @@ -145,10 +145,10 @@ ; CHECK: dead [[MOV32rm]].sub_8bit:gr32 = ADD8ri [[MOV32rm]].sub_8bit, -1, implicit-def $eflags ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.2, 1, $noreg, 0, $noreg :: (load 8 from %stack.2) ; CHECK: [[ADCX64rm:%[0-9]+]]:gr64 = ADCX64rm [[ADCX64rm]], %stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit killed $eflags :: (load 8 from %stack.1) - ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit killed $eflags + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit killed $eflags ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) ; CHECK: MOV64mr [[MOV64rm]], 1, $noreg, 0, $noreg, [[ADCX64rm]] :: (store 8 into %ir.4, align 1) - ; CHECK: $al = COPY [[SETBr]] + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, $al %3:gr64 = COPY $rcx %2:gr64 = COPY $rdx @@ -157,7 +157,7 @@ INLINEASM &nop, 1, 3145738, def dead %4, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15 dead %0.sub_8bit:gr32 = ADD8ri %0.sub_8bit, -1, implicit-def $eflags %7:gr64 = ADCX64rr %7, %2, implicit-def $eflags, implicit killed $eflags - %8:gr8 = SETBr implicit killed $eflags + %8:gr8 = SETCCr 2, implicit killed $eflags MOV64mr %3, 1, $noreg, 0, $noreg, %7 :: (store 8 into %ir.4, align 1) $al = COPY %8 RET 0, killed $al @@ -197,10 +197,10 @@ ; CHECK: dead [[MOV32rm]].sub_8bit:gr32 = ADD8ri [[MOV32rm]].sub_8bit, 127, implicit-def $eflags ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %stack.2, 1, $noreg, 0, $noreg :: (load 4 from %stack.2) ; CHECK: [[ADOX32rm:%[0-9]+]]:gr32 = ADOX32rm [[ADOX32rm]], %stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit killed $eflags :: (load 4 from %stack.1) - ; CHECK: [[SETOr:%[0-9]+]]:gr8 = SETOr implicit killed $eflags + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 0, implicit killed $eflags ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) ; CHECK: MOV32mr [[MOV64rm]], 1, $noreg, 0, $noreg, [[ADOX32rm]] :: (store 4 into %ir.4, align 1) - ; CHECK: $al = COPY [[SETOr]] + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, $al %3:gr64 = COPY $rcx %2:gr32 = COPY $edx @@ -209,7 +209,7 @@ INLINEASM &nop, 1, 3145738, def dead %4, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15 dead %0.sub_8bit:gr32 = ADD8ri %0.sub_8bit, 127, implicit-def $eflags %7:gr32 = ADOX32rr %7, %2, implicit-def $eflags, implicit killed $eflags - %8:gr8 = SETOr implicit killed $eflags + %8:gr8 = SETCCr 0, implicit killed $eflags MOV32mr %3, 1, $noreg, 0, $noreg, %7 :: (store 4 into %ir.4, align 1) $al = COPY %8 RET 0, killed $al @@ -249,10 +249,10 @@ ; CHECK: dead [[MOV32rm]].sub_8bit:gr32 = ADD8ri [[MOV32rm]].sub_8bit, 127, implicit-def $eflags ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.2, 1, $noreg, 0, $noreg :: (load 8 from %stack.2) ; CHECK: [[ADOX64rm:%[0-9]+]]:gr64 = ADOX64rm [[ADOX64rm]], %stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit killed $eflags :: (load 8 from %stack.1) - ; CHECK: [[SETOr:%[0-9]+]]:gr8 = SETOr implicit killed $eflags + ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 0, implicit killed $eflags ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) ; CHECK: MOV64mr [[MOV64rm]], 1, $noreg, 0, $noreg, [[ADOX64rm]] :: (store 8 into %ir.4, align 1) - ; CHECK: $al = COPY [[SETOr]] + ; CHECK: $al = COPY [[SETCCr]] ; CHECK: RET 0, $al %3:gr64 = COPY $rcx %2:gr64 = COPY $rdx @@ -261,7 +261,7 @@ INLINEASM &nop, 1, 3145738, def dead %4, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15 dead %0.sub_8bit:gr32 = ADD8ri %0.sub_8bit, 127, implicit-def $eflags %7:gr64 = ADOX64rr %7, %2, implicit-def $eflags, implicit killed $eflags - %8:gr8 = SETOr implicit killed $eflags + %8:gr8 = SETCCr 0, implicit killed $eflags MOV64mr %3, 1, $noreg, 0, $noreg, %7 :: (store 8 into %ir.4, align 1) $al = COPY %8 RET 0, killed $al Index: tools/llvm-exegesis/lib/X86/Target.cpp =================================================================== --- tools/llvm-exegesis/lib/X86/Target.cpp +++ tools/llvm-exegesis/lib/X86/Target.cpp @@ -33,6 +33,7 @@ case X86II::MRMSrcReg4VOp3: case X86II::MRMSrcRegOp4: case X86II::MRMSrcRegCC: + case X86II::MRMXrCC: case X86II::MRMXr: case X86II::MRM0r: case X86II::MRM1r: @@ -120,6 +121,7 @@ case X86II::MRMSrcMem4VOp3: case X86II::MRMSrcMemOp4: case X86II::MRMSrcMemCC: + case X86II::MRMXmCC: case X86II::MRMXm: case X86II::MRM0m: case X86II::MRM1m: Index: utils/TableGen/X86RecognizableInstr.h =================================================================== --- utils/TableGen/X86RecognizableInstr.h +++ utils/TableGen/X86RecognizableInstr.h @@ -106,7 +106,7 @@ MRMSrcMem4VOp3 = 34, MRMSrcMemOp4 = 35, MRMSrcMemCC = 36, - MRMXm = 39, + MRMXmCC = 38, MRMXm = 39, MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43, MRM4m = 44, MRM5m = 45, MRM6m = 46, MRM7m = 47, MRMDestReg = 48, @@ -114,7 +114,7 @@ MRMSrcReg4VOp3 = 50, MRMSrcRegOp4 = 51, MRMSrcRegCC = 52, - MRMXr = 55, + MRMXrCC = 54, MRMXr = 55, MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59, MRM4r = 60, MRM5r = 61, MRM6r = 62, MRM7r = 63, #define MAP(from, to) MRM_##from = to, Index: utils/TableGen/X86RecognizableInstr.cpp =================================================================== --- utils/TableGen/X86RecognizableInstr.cpp +++ utils/TableGen/X86RecognizableInstr.cpp @@ -634,6 +634,12 @@ HANDLE_OPERAND(memory) HANDLE_OPERAND(opcodeModifier) break; + case X86Local::MRMXrCC: + assert(numPhysicalOperands == 2 && + "Unexpected number of operands for MRMXrCC"); + HANDLE_OPERAND(rmRegister) + HANDLE_OPERAND(opcodeModifier) + break; case X86Local::MRMXr: case X86Local::MRM0r: case X86Local::MRM1r: @@ -659,6 +665,12 @@ HANDLE_OPTIONAL(relocation) HANDLE_OPTIONAL(immediate) break; + case X86Local::MRMXmCC: + assert(numPhysicalOperands == 2 && + "Unexpected number of operands for MRMXm"); + HANDLE_OPERAND(memory) + HANDLE_OPERAND(opcodeModifier) + break; case X86Local::MRMXm: case X86Local::MRM0m: case X86Local::MRM1m: @@ -744,6 +756,7 @@ case X86Local::MRMSrcReg4VOp3: case X86Local::MRMSrcRegOp4: case X86Local::MRMSrcRegCC: + case X86Local::MRMXrCC: case X86Local::MRMXr: filter = llvm::make_unique(true); break; @@ -752,6 +765,7 @@ case X86Local::MRMSrcMem4VOp3: case X86Local::MRMSrcMemOp4: case X86Local::MRMSrcMemCC: + case X86Local::MRMXmCC: case X86Local::MRMXm: filter = llvm::make_unique(false); break; @@ -785,7 +799,8 @@ assert(filter && "Filter not set"); if (Form == X86Local::AddRegFrm || Form == X86Local::MRMSrcRegCC || - Form == X86Local::MRMSrcMemCC) { + Form == X86Local::MRMSrcMemCC || Form == X86Local::MRMXrCC || + Form == X86Local::MRMXmCC) { unsigned Count = Form == X86Local::AddRegFrm ? 8 : 16; assert(((opcodeToSet % Count) == 0) && "ADDREG_FRM opcode not aligned");