diff --git a/clang/test/CodeGen/ms-inline-asm-64.c b/clang/test/CodeGen/ms-inline-asm-64.c --- a/clang/test/CodeGen/ms-inline-asm-64.c +++ b/clang/test/CodeGen/ms-inline-asm-64.c @@ -58,3 +58,17 @@ // CHECK-SAME: mov [ebx + $$4], ecx // CHECK-SAME: "*m,~{eax},~{ebx},~{dirflag},~{fpsr},~{flags}"(%struct.t3_type* %{{.*}}) } + +void bar() {} + +void t5() { + __asm { + call bar + jmp bar + } + // CHECK: t5 + // CHECK: call void asm sideeffect inteldialect + // CHECK-SAME: call qword ptr ${0:P} + // CHECK-SAME: jmp qword ptr ${1:P} + // CHECK-SAME: "*m,*m,~{dirflag},~{fpsr},~{flags}"(void (...)* bitcast (void ()* @bar to void (...)*), void (...)* bitcast (void ()* @bar to void (...)*)) +} diff --git a/llvm/include/llvm/MC/MCInstrDesc.h b/llvm/include/llvm/MC/MCInstrDesc.h --- a/llvm/include/llvm/MC/MCInstrDesc.h +++ b/llvm/include/llvm/MC/MCInstrDesc.h @@ -37,7 +37,12 @@ /// These are flags set on operands, but should be considered /// private, all access should go through the MCOperandInfo accessors. /// See the accessors for a description of what these are. -enum OperandFlags { LookupPtrRegClass = 0, Predicate, OptionalDef }; +enum OperandFlags { + LookupPtrRegClass = 0, + Predicate, + OptionalDef, + BranchTarget +}; /// Operands are tagged with one of the values of this enum. enum OperandType { @@ -98,6 +103,9 @@ /// Set if this operand is a optional def. bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); } + /// Set if this operand is a branch target. + bool isBranchTarget() const { return Flags & (1 << MCOI::BranchTarget); } + bool isGenericType() const { return OperandType >= MCOI::OPERAND_FIRST_GENERIC && OperandType <= MCOI::OPERAND_LAST_GENERIC; diff --git a/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h --- a/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h +++ b/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h @@ -71,10 +71,6 @@ /// variable/label? Only valid when parsing MS-style inline assembly. virtual bool needAddressOf() const { return false; } - /// isCallOperand - Is this an operand of an inline-assembly call instruction? - /// Only valid when parsing MS-style inline assembly. - virtual bool isCallOperand() const { return false; } - /// isOffsetOfLocal - Do we need to emit code to get the offset of the local /// variable, rather than its value? Only valid when parsing MS-style inline /// assembly. diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -5845,7 +5845,7 @@ InputDecls.push_back(OpDecl); InputDeclsAddressOf.push_back(Operand.needAddressOf()); InputConstraints.push_back(Constraint.str()); - if (Operand.isCallOperand()) + if (Desc.OpInfo[i - 1].isBranchTarget()) AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size()); else AsmStrRewrites.emplace_back(AOK_Input, Start, SymName.size()); diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -2924,15 +2924,6 @@ } } - // Mark the operands of a call instruction. These need to be handled - // differently when referenced in MS-style inline assembly. - if (Name.startswith("call") || Name.startswith("lcall")) { - for (size_t i = 1; i < Operands.size(); ++i) { - X86Operand &Op = static_cast(*Operands[i]); - Op.setCallOperand(true); - } - } - if (Flags) Operands.push_back(X86Operand::CreatePrefix(Flags, NameLoc, NameLoc)); return false; diff --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h --- a/llvm/lib/Target/X86/AsmParser/X86Operand.h +++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h @@ -284,9 +284,6 @@ bool needAddressOf() const override { return AddressOf; } - bool isCallOperand() const override { return CallOperand; } - void setCallOperand(bool IsCallOperand) { CallOperand = IsCallOperand; } - bool isMem() const override { return Kind == Memory; } bool isMemUnsized() const { return Kind == Memory && Mem.Size == 0; diff --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td --- a/llvm/lib/Target/X86/X86InstrControl.td +++ b/llvm/lib/Target/X86/X86InstrControl.td @@ -220,12 +220,12 @@ // registers are added manually. let Uses = [ESP, SSP] in { def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm, - (outs), (ins i32imm_pcrel:$dst), + (outs), (ins i32imm_brtarget:$dst), "call{l}\t$dst", []>, OpSize32, Requires<[Not64BitMode]>, Sched<[WriteJump]>; let hasSideEffects = 0 in def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm, - (outs), (ins i16imm_pcrel:$dst), + (outs), (ins i16imm_brtarget:$dst), "call{w}\t$dst", []>, OpSize16, Sched<[WriteJump]>; def CALL16r : I<0xFF, MRM2r, (outs), (ins GR16:$dst), @@ -285,7 +285,7 @@ // Tail call stuff. let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, isCodeGenOnly = 1, Uses = [ESP, SSP] in { - def TCRETURNdi : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$offset), + def TCRETURNdi : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$offset), []>, Sched<[WriteJump]>, NotMemoryFoldable; def TCRETURNri : PseudoI<(outs), (ins ptr_rc_tailcall:$dst, i32imm:$offset), []>, Sched<[WriteJump]>, NotMemoryFoldable; @@ -293,7 +293,7 @@ def TCRETURNmi : PseudoI<(outs), (ins i32mem_TC:$dst, i32imm:$offset), []>, Sched<[WriteJumpLd]>; - def TAILJMPd : PseudoI<(outs), (ins i32imm_pcrel:$dst), + def TAILJMPd : PseudoI<(outs), (ins i32imm_brtarget:$dst), []>, Sched<[WriteJump]>; def TAILJMPr : PseudoI<(outs), (ins ptr_rc_tailcall:$dst), @@ -309,10 +309,11 @@ isCodeGenOnly = 1, SchedRW = [WriteJump] in let Uses = [ESP, EFLAGS, SSP] in { def TCRETURNdicc : PseudoI<(outs), - (ins i32imm_pcrel:$dst, i32imm:$offset, i32imm:$cond), []>; + (ins i32imm_brtarget:$dst, i32imm:$offset, i32imm:$cond), + []>; // This gets substituted to a conditional jump instruction in MC lowering. - def TAILJMPd_CC : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$cond), []>; + def TAILJMPd_CC : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$cond), []>; } @@ -328,7 +329,7 @@ // that the offset between an arbitrary immediate and the call will fit in // the 32-bit pcrel field that we have. def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm, - (outs), (ins i64i32imm_pcrel:$dst), + (outs), (ins i64i32imm_brtarget:$dst), "call{q}\t$dst", []>, OpSize32, Requires<[In64BitMode]>; def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst), @@ -357,7 +358,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, isCodeGenOnly = 1, Uses = [RSP, SSP] in { def TCRETURNdi64 : PseudoI<(outs), - (ins i64i32imm_pcrel:$dst, i32imm:$offset), + (ins i64i32imm_brtarget:$dst, i32imm:$offset), []>, Sched<[WriteJump]>; def TCRETURNri64 : PseudoI<(outs), (ins ptr_rc_tailcall:$dst, i32imm:$offset), @@ -367,7 +368,7 @@ (ins i64mem_TC:$dst, i32imm:$offset), []>, Sched<[WriteJumpLd]>, NotMemoryFoldable; - def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_pcrel:$dst), + def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_brtarget:$dst), []>, Sched<[WriteJump]>; def TAILJMPr64 : PseudoI<(outs), (ins ptr_rc_tailcall:$dst), @@ -415,10 +416,10 @@ isCodeGenOnly = 1, SchedRW = [WriteJump] in let Uses = [RSP, EFLAGS, SSP] in { def TCRETURNdi64cc : PseudoI<(outs), - (ins i64i32imm_pcrel:$dst, i32imm:$offset, + (ins i64i32imm_brtarget:$dst, i32imm:$offset, i32imm:$cond), []>; // This gets substituted to a conditional jump instruction in MC lowering. def TAILJMPd64_CC : PseudoI<(outs), - (ins i64i32imm_pcrel:$dst, i32imm:$cond), []>; + (ins i64i32imm_brtarget:$dst, i32imm:$cond), []>; } diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -454,18 +454,6 @@ let OperandType = "OPERAND_MEMORY"; } -let OperandType = "OPERAND_PCREL", - ParserMatchClass = X86AbsMemAsmOperand, - PrintMethod = "printPCRelImm" in { -def i32imm_pcrel : Operand; -def i16imm_pcrel : Operand; - -// Branch targets have OtherVT type and print as pc-relative values. -def brtarget : Operand; -def brtarget8 : Operand; - -} - // Special parser to detect 16-bit mode to select 16-bit displacement. def X86AbsMem16AsmOperand : AsmOperandClass { let Name = "AbsMem16"; @@ -473,14 +461,26 @@ let SuperClasses = [X86AbsMemAsmOperand]; } -// Branch targets have OtherVT type and print as pc-relative values. -let OperandType = "OPERAND_PCREL", - PrintMethod = "printPCRelImm" in { -let ParserMatchClass = X86AbsMem16AsmOperand in - def brtarget16 : Operand; -let ParserMatchClass = X86AbsMemAsmOperand in - def brtarget32 : Operand; +// Branch targets print as pc-relative values. +class BranchTargetOperand : Operand { + let OperandType = "OPERAND_PCREL"; + let PrintMethod = "printPCRelImm"; + let ParserMatchClass = X86AbsMemAsmOperand; +} + +def i32imm_brtarget : BranchTargetOperand; +def i16imm_brtarget : BranchTargetOperand; + +// 64-bits but only 32 bits are significant, and those bits are treated as being +// pc relative. +def i64i32imm_brtarget : BranchTargetOperand; + +def brtarget : BranchTargetOperand; +def brtarget8 : BranchTargetOperand; +def brtarget16 : BranchTargetOperand { + let ParserMatchClass = X86AbsMem16AsmOperand; } +def brtarget32 : BranchTargetOperand; let RenderMethod = "addSrcIdxOperands" in { def X86SrcIdx8Operand : AsmOperandClass { @@ -756,14 +756,6 @@ let OperandType = "OPERAND_IMMEDIATE"; } -// 64-bits but only 32 bits are significant, and those bits are treated as being -// pc relative. -def i64i32imm_pcrel : Operand { - let PrintMethod = "printPCRelImm"; - let ParserMatchClass = X86AbsMemAsmOperand; - let OperandType = "OPERAND_PCREL"; -} - def lea64_32mem : Operand { let PrintMethod = "printanymem"; let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG); diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp --- a/llvm/utils/TableGen/InstrInfoEmitter.cpp +++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp @@ -164,6 +164,11 @@ if (Op.Rec->isSubClassOf("OptionalDefOperand")) Res += "|(1<isSubClassOf("BranchTargetOperand")) + Res += "|(1<