Index: llvm/lib/Target/X86/X86InstrControl.td =================================================================== --- llvm/lib/Target/X86/X86InstrControl.td +++ llvm/lib/Target/X86/X86InstrControl.td @@ -145,6 +145,17 @@ [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>, Sched<[WriteJumpLd]>; + // Win64 wants indirect jumps leaving the function to have a REX_W prefix. + // These are switched from TAILJMPr/m64_REX in MCInstLower. + let isCodeGenOnly = 1, hasREX_WPrefix = 1 in { + def JMP64r_REX : I<0xFF, MRM4r, (outs), (ins GR64:$dst), + "rex64 jmp{q}\t{*}$dst", []>, Sched<[WriteJump]>; + let mayLoad = 1 in + def JMP64m_REX : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), + "rex64 jmp{q}\t{*}$dst", []>, Sched<[WriteJumpLd]>; + + } + // Non-tracking jumps for IBT, use with caution. let isCodeGenOnly = 1 in { def JMP16r_NT : I<0xFF, MRM4r, (outs), (ins GR16 : $dst), "jmp{w}\t{*}$dst", @@ -282,16 +293,14 @@ def TCRETURNmi : PseudoI<(outs), (ins i32mem_TC:$dst, i32imm:$offset), []>, Sched<[WriteJumpLd]>; - // FIXME: The should be pseudo instructions that are lowered when going to - // mcinst. - def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs), (ins i32imm_pcrel:$dst), - "jmp\t$dst", []>, Sched<[WriteJump]>; + def TAILJMPd : PseudoI<(outs), (ins i32imm_pcrel:$dst), + []>, Sched<[WriteJump]>; - def TAILJMPr : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst), - "", []>, Sched<[WriteJump]>; // FIXME: Remove encoding when JIT is dead. + def TAILJMPr : PseudoI<(outs), (ins ptr_rc_tailcall:$dst), + []>, Sched<[WriteJump]>; let mayLoad = 1 in - def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst), - "jmp{l}\t{*}$dst", []>, Sched<[WriteJumpLd]>; + def TAILJMPm : PseudoI<(outs), (ins i32mem_TC:$dst), + []>, Sched<[WriteJumpLd]>; } // Conditional tail calls are similar to the above, but they are branches @@ -303,8 +312,7 @@ (ins i32imm_pcrel:$dst, i32imm:$offset, i32imm:$cond), []>; // This gets substituted to a conditional jump instruction in MC lowering. - def TAILJMPd_CC : Ii32PCRel<0x80, RawFrm, (outs), - (ins i32imm_pcrel:$dst, i32imm:$cond), "", []>; + def TAILJMPd_CC : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$cond), []>; } @@ -359,24 +367,24 @@ (ins i64mem_TC:$dst, i32imm:$offset), []>, Sched<[WriteJumpLd]>, NotMemoryFoldable; - def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs), (ins i64i32imm_pcrel:$dst), - "jmp\t$dst", []>, Sched<[WriteJump]>; + def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_pcrel:$dst), + []>, Sched<[WriteJump]>; - def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst), - "jmp{q}\t{*}$dst", []>, Sched<[WriteJump]>; + def TAILJMPr64 : PseudoI<(outs), (ins ptr_rc_tailcall:$dst), + []>, Sched<[WriteJump]>; let mayLoad = 1 in - def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst), - "jmp{q}\t{*}$dst", []>, Sched<[WriteJumpLd]>; + def TAILJMPm64 : PseudoI<(outs), (ins i64mem_TC:$dst), + []>, Sched<[WriteJumpLd]>; // Win64 wants indirect jumps leaving the function to have a REX_W prefix. let hasREX_WPrefix = 1 in { - def TAILJMPr64_REX : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst), - "rex64 jmp{q}\t{*}$dst", []>, Sched<[WriteJump]>; + def TAILJMPr64_REX : PseudoI<(outs), (ins ptr_rc_tailcall:$dst), + []>, Sched<[WriteJump]>; let mayLoad = 1 in - def TAILJMPm64_REX : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst), - "rex64 jmp{q}\t{*}$dst", []>, Sched<[WriteJumpLd]>; + def TAILJMPm64_REX : PseudoI<(outs), (ins i64mem_TC:$dst), + []>, Sched<[WriteJumpLd]>; } } @@ -411,6 +419,6 @@ i32imm:$cond), []>; // This gets substituted to a conditional jump instruction in MC lowering. - def TAILJMPd64_CC : Ii32PCRel<0x80, RawFrm, (outs), - (ins i64i32imm_pcrel:$dst, i32imm:$cond), "", []>; + def TAILJMPd64_CC : PseudoI<(outs), + (ins i64i32imm_pcrel:$dst, i32imm:$cond), []>; } Index: llvm/lib/Target/X86/X86MCInstLower.cpp =================================================================== --- llvm/lib/Target/X86/X86MCInstLower.cpp +++ llvm/lib/Target/X86/X86MCInstLower.cpp @@ -427,6 +427,41 @@ } } +// Replace TAILJMP opcodes with their equivalent opcodes that have encoding +// information. +static unsigned convertTailJumpOpcode(unsigned Opcode) { + switch (Opcode) { + case X86::TAILJMPr: + Opcode = X86::JMP32r; + break; + case X86::TAILJMPm: + Opcode = X86::JMP32m; + break; + case X86::TAILJMPr64: + Opcode = X86::JMP64r; + break; + case X86::TAILJMPm64: + Opcode = X86::JMP64m; + break; + case X86::TAILJMPr64_REX: + Opcode = X86::JMP64r_REX; + break; + case X86::TAILJMPm64_REX: + Opcode = X86::JMP64m_REX; + break; + case X86::TAILJMPd: + case X86::TAILJMPd64: + Opcode = X86::JMP_1; + break; + case X86::TAILJMPd_CC: + case X86::TAILJMPd64_CC: + Opcode = X86::JCC_1; + break; + } + + return Opcode; +} + void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); @@ -500,12 +535,10 @@ break; } - // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions used to have + // CALL64r, CALL64pcrel32 - These instructions used to have // register inputs modeled as normal uses instead of implicit uses. As such, // they we used to truncate off all but the first operand (the callee). This // issue seems to have been fixed at some point. This assert verifies that. - case X86::TAILJMPr64: - case X86::TAILJMPr64_REX: case X86::CALL64r: case X86::CALL64pcrel32: assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!"); @@ -535,31 +568,33 @@ break; } - // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump - // instruction. - { - unsigned Opcode; - case X86::TAILJMPr: - Opcode = X86::JMP32r; - goto SetTailJmpOpcode; - case X86::TAILJMPd: - case X86::TAILJMPd64: - Opcode = X86::JMP_1; - goto SetTailJmpOpcode; - - SetTailJmpOpcode: - assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!"); - OutMI.setOpcode(Opcode); - break; - } + // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump + // instruction. + case X86::TAILJMPr: + case X86::TAILJMPr64: + case X86::TAILJMPr64_REX: + case X86::TAILJMPd: + case X86::TAILJMPd64: + assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!"); + OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode())); + break; + } case X86::TAILJMPd_CC: case X86::TAILJMPd64_CC: { assert(OutMI.getNumOperands() == 2 && "Unexpected number of operands!"); - OutMI.setOpcode(X86::JCC_1); + OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode())); break; } + case X86::TAILJMPm: + case X86::TAILJMPm64: + case X86::TAILJMPm64_REX: + assert(OutMI.getNumOperands() == X86::AddrNumOperands && + "Unexpected number of operands!"); + OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode())); + break; + case X86::DEC16r: case X86::DEC32r: case X86::INC16r: @@ -1359,6 +1394,7 @@ recordSled(CurSled, MI, SledKind::TAIL_CALL); unsigned OpCode = MI.getOperand(0).getImm(); + OpCode = convertTailJumpOpcode(OpCode); MCInst TC; TC.setOpcode(OpCode);