diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -4095,12 +4095,12 @@ // be found here: // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions switch (Inst.getOpcode()) { - case X86::RETW: - case X86::RETL: - case X86::RETQ: - case X86::RETIL: - case X86::RETIQ: - case X86::RETIW: { + case X86::RET16: + case X86::RET32: + case X86::RET64: + case X86::RETI16: + case X86::RETI32: + case X86::RETI64: { MCInst ShlInst, FenceInst; bool Parse32 = is32BitMode() || Code16GCC; unsigned Basereg = diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp --- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp +++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp @@ -394,10 +394,10 @@ MachineInstrBuilder MIB; if (StackAdj == 0) { MIB = BuildMI(MBB, MBBI, DL, - TII->get(STI->is64Bit() ? X86::RETQ : X86::RETL)); + TII->get(STI->is64Bit() ? X86::RET64 : X86::RET32)); } else if (isUInt<16>(StackAdj)) { MIB = BuildMI(MBB, MBBI, DL, - TII->get(STI->is64Bit() ? X86::RETIQ : X86::RETIL)) + TII->get(STI->is64Bit() ? X86::RETI64 : X86::RETI32)) .addImm(StackAdj); } else { assert(!STI->is64Bit() && @@ -407,7 +407,7 @@ BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r)).addReg(X86::ECX, RegState::Define); X86FL->emitSPUpdate(MBB, MBBI, DL, StackAdj, /*InEpilogue=*/true); BuildMI(MBB, MBBI, DL, TII->get(X86::PUSH32r)).addReg(X86::ECX); - MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RETL)); + MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RET32)); } for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I) MIB.add(MBBI->getOperand(I)); diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1304,11 +1304,11 @@ MachineInstrBuilder MIB; if (X86MFInfo->getBytesToPopOnReturn()) { MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(Subtarget->is64Bit() ? X86::RETIQ : X86::RETIL)) + TII.get(Subtarget->is64Bit() ? X86::RETI64 : X86::RETI32)) .addImm(X86MFInfo->getBytesToPopOnReturn()); } else { MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL)); + TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32)); } for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) MIB.addReg(RetRegs[i], RegState::Implicit); diff --git a/llvm/lib/Target/X86/X86IndirectThunks.cpp b/llvm/lib/Target/X86/X86IndirectThunks.cpp --- a/llvm/lib/Target/X86/X86IndirectThunks.cpp +++ b/llvm/lib/Target/X86/X86IndirectThunks.cpp @@ -212,7 +212,7 @@ MF.push_back(CallTarget); const unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32; - const unsigned RetOpc = Is64Bit ? X86::RETQ : X86::RETL; + const unsigned RetOpc = Is64Bit ? X86::RET64 : X86::RET32; Entry->addLiveIn(ThunkReg); BuildMI(Entry, DebugLoc(), TII->get(CallOpc)).addSym(TargetSym); diff --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td --- a/llvm/lib/Target/X86/X86InstrControl.td +++ b/llvm/lib/Target/X86/X86InstrControl.td @@ -20,30 +20,30 @@ // ST1 arguments when returning values on the x87 stack. let isTerminator = 1, isReturn = 1, isBarrier = 1, hasCtrlDep = 1, FPForm = SpecialFP, SchedRW = [WriteJumpLd] in { - def RETL : I <0xC3, RawFrm, (outs), (ins variable_ops), + def RET32 : I <0xC3, RawFrm, (outs), (ins variable_ops), "ret{l}", []>, OpSize32, Requires<[Not64BitMode]>; - def RETQ : I <0xC3, RawFrm, (outs), (ins variable_ops), + def RET64 : I <0xC3, RawFrm, (outs), (ins variable_ops), "ret{q}", []>, OpSize32, Requires<[In64BitMode]>; - def RETW : I <0xC3, RawFrm, (outs), (ins), + def RET16 : I <0xC3, RawFrm, (outs), (ins), "ret{w}", []>, OpSize16; - def RETIL : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops), + def RETI32 : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops), "ret{l}\t$amt", []>, OpSize32, Requires<[Not64BitMode]>; - def RETIQ : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops), + def RETI64 : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops), "ret{q}\t$amt", []>, OpSize32, Requires<[In64BitMode]>; - def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt), + def RETI16 : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt), "ret{w}\t$amt", []>, OpSize16; - def LRETL : I <0xCB, RawFrm, (outs), (ins), + def LRET32 : I <0xCB, RawFrm, (outs), (ins), "{l}ret{l|f}", []>, OpSize32; - def LRETQ : RI <0xCB, RawFrm, (outs), (ins), + def LRET64 : RI <0xCB, RawFrm, (outs), (ins), "{l}ret{|f}q", []>, Requires<[In64BitMode]>; - def LRETW : I <0xCB, RawFrm, (outs), (ins), + def LRET16 : I <0xCB, RawFrm, (outs), (ins), "{l}ret{w|f}", []>, OpSize16; - def LRETIL : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt), - "{l}ret{l|f}\t$amt", []>, OpSize32; - def LRETIQ : RIi16<0xCA, RawFrm, (outs), (ins i16imm:$amt), - "{l}ret{|f}q\t$amt", []>, Requires<[In64BitMode]>; - def LRETIW : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt), - "{l}ret{w|f}\t$amt", []>, OpSize16; + def LRETI32 : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt), + "{l}ret{l|f}\t$amt", []>, OpSize32; + def LRETI64 : RIi16<0xCA, RawFrm, (outs), (ins i16imm:$amt), + "{l}ret{|f}q\t$amt", []>, Requires<[In64BitMode]>; + def LRETI16 : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt), + "{l}ret{w|f}\t$amt", []>, OpSize16; // The machine return from interrupt instruction, but sometimes we need to // perform a post-epilogue stack adjustment. Codegen emits the pseudo form diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -82,7 +82,7 @@ (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 : X86::ADJCALLSTACKUP32), X86::CATCHRET, - (STI.is64Bit() ? X86::RETQ : X86::RETL)), + (STI.is64Bit() ? X86::RET64 : X86::RET32)), Subtarget(STI), RI(STI.getTargetTriple()) { } @@ -9363,7 +9363,7 @@ // We're a normal call, so our sequence doesn't have a return instruction. // Add it in. - MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ)); + MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RET64)); MBB.insert(MBB.end(), retq); } diff --git a/llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp b/llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp --- a/llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp +++ b/llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp @@ -76,7 +76,7 @@ bool Modified = false; for (auto &MBB : MF) { for (auto MBBI = MBB.begin(); MBBI != MBB.end(); ++MBBI) { - if (MBBI->getOpcode() != X86::RETQ) + if (MBBI->getOpcode() != X86::RET64) continue; unsigned ClobberReg = TRI->findDeadCallerSavedReg(MBB, MBBI); diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -421,7 +421,7 @@ } static unsigned getRetOpcode(const X86Subtarget &Subtarget) { - return Subtarget.is64Bit() ? X86::RETQ : X86::RETL; + return Subtarget.is64Bit() ? X86::RET64 : X86::RET32; } Optional diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp --- a/llvm/lib/Target/X86/X86RegisterInfo.cpp +++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp @@ -816,10 +816,10 @@ return 0; case TargetOpcode::PATCHABLE_RET: case X86::RET: - case X86::RETL: - case X86::RETQ: - case X86::RETIL: - case X86::RETIQ: + case X86::RET32: + case X86::RET64: + case X86::RETI32: + case X86::RETI64: case X86::TCRETURNdi: case X86::TCRETURNri: case X86::TCRETURNmi: diff --git a/llvm/lib/Target/X86/X86SchedBroadwell.td b/llvm/lib/Target/X86/X86SchedBroadwell.td --- a/llvm/lib/Target/X86/X86SchedBroadwell.td +++ b/llvm/lib/Target/X86/X86SchedBroadwell.td @@ -1110,7 +1110,7 @@ let NumMicroOps = 3; let ResourceCycles = [1,1,1]; } -def: InstRW<[BWWriteResGroup84], (instrs LRETQ, RETQ)>; +def: InstRW<[BWWriteResGroup84], (instrs LRET64, RET64)>; def BWWriteResGroup87 : SchedWriteRes<[BWPort4,BWPort23,BWPort237,BWPort06]> { let Latency = 7; diff --git a/llvm/lib/Target/X86/X86SchedHaswell.td b/llvm/lib/Target/X86/X86SchedHaswell.td --- a/llvm/lib/Target/X86/X86SchedHaswell.td +++ b/llvm/lib/Target/X86/X86SchedHaswell.td @@ -710,7 +710,7 @@ let NumMicroOps = 4; let ResourceCycles = [1, 2, 1]; } -def : InstRW<[HWWriteRETI], (instregex "RETI(L|Q|W)", "LRETI(L|Q|W)")>; +def : InstRW<[HWWriteRETI], (instregex "RETI(16|32|64)", "LRETI(16|32|64)")>; // BOUND. // r,m. @@ -1188,7 +1188,7 @@ let NumMicroOps = 3; let ResourceCycles = [1,1,1]; } -def: InstRW<[HWWriteResGroup41], (instrs LRETQ, RETL, RETQ)>; +def: InstRW<[HWWriteResGroup41], (instrs LRET64, RET32, RET64)>; def HWWriteResGroup44 : SchedWriteRes<[HWPort4,HWPort6,HWPort237,HWPort0156]> { let Latency = 3; diff --git a/llvm/lib/Target/X86/X86SchedIceLake.td b/llvm/lib/Target/X86/X86SchedIceLake.td --- a/llvm/lib/Target/X86/X86SchedIceLake.td +++ b/llvm/lib/Target/X86/X86SchedIceLake.td @@ -1444,7 +1444,7 @@ let NumMicroOps = 3; let ResourceCycles = [1,1,1]; } -def: InstRW<[ICXWriteResGroup104], (instrs LRETQ, RETQ)>; +def: InstRW<[ICXWriteResGroup104], (instrs LRET64, RET64)>; def ICXWriteResGroup106 : SchedWriteRes<[ICXPort4,ICXPort5,ICXPort237]> { let Latency = 7; diff --git a/llvm/lib/Target/X86/X86SchedSandyBridge.td b/llvm/lib/Target/X86/X86SchedSandyBridge.td --- a/llvm/lib/Target/X86/X86SchedSandyBridge.td +++ b/llvm/lib/Target/X86/X86SchedSandyBridge.td @@ -606,7 +606,7 @@ def: InstRW<[SBWriteResGroup2], (instrs FDECSTP, FINCSTP, FFREE, FFREEP, FNOP, LD_Frr, ST_Frr, ST_FPrr)>; def: InstRW<[SBWriteResGroup2], (instrs LOOP, LOOPE, LOOPNE)>; // FIXME: This seems wrong compared to other Intel CPUs. -def: InstRW<[SBWriteResGroup2], (instrs RETQ)>; +def: InstRW<[SBWriteResGroup2], (instrs RET64)>; def SBWriteResGroup4 : SchedWriteRes<[SBPort05]> { let Latency = 1; diff --git a/llvm/lib/Target/X86/X86SchedSkylakeClient.td b/llvm/lib/Target/X86/X86SchedSkylakeClient.td --- a/llvm/lib/Target/X86/X86SchedSkylakeClient.td +++ b/llvm/lib/Target/X86/X86SchedSkylakeClient.td @@ -1175,7 +1175,7 @@ let NumMicroOps = 3; let ResourceCycles = [1,1,1]; } -def: InstRW<[SKLWriteResGroup98], (instrs LRETQ, RETQ)>; +def: InstRW<[SKLWriteResGroup98], (instrs LRET64, RET64)>; def SKLWriteResGroup100 : SchedWriteRes<[SKLPort4,SKLPort23,SKLPort237,SKLPort06]> { let Latency = 7; diff --git a/llvm/lib/Target/X86/X86SchedSkylakeServer.td b/llvm/lib/Target/X86/X86SchedSkylakeServer.td --- a/llvm/lib/Target/X86/X86SchedSkylakeServer.td +++ b/llvm/lib/Target/X86/X86SchedSkylakeServer.td @@ -1436,7 +1436,7 @@ let NumMicroOps = 3; let ResourceCycles = [1,1,1]; } -def: InstRW<[SKXWriteResGroup104], (instrs LRETQ, RETQ)>; +def: InstRW<[SKXWriteResGroup104], (instrs LRET64, RET64)>; def SKXWriteResGroup106 : SchedWriteRes<[SKXPort4,SKXPort5,SKXPort237]> { let Latency = 7; diff --git a/llvm/lib/Target/X86/X86ScheduleAtom.td b/llvm/lib/Target/X86/X86ScheduleAtom.td --- a/llvm/lib/Target/X86/X86ScheduleAtom.td +++ b/llvm/lib/Target/X86/X86ScheduleAtom.td @@ -540,7 +540,7 @@ PUSH16rmr, PUSH32rmr, PUSH64rmr, PUSH16i8, PUSH32i8, PUSH64i8, PUSH64i32, XCH_F)>; -def : InstRW<[AtomWrite0_1_1], (instregex "RETI(L|Q|W)$", +def : InstRW<[AtomWrite0_1_1], (instregex "RETI(16|32|64)$", "IRET(16|32|64)?")>; def AtomWrite0_1_5 : SchedWriteRes<[AtomPort0, AtomPort1]> { @@ -819,8 +819,8 @@ let Latency = 79; let ResourceCycles = [79]; } -def : InstRW<[AtomWrite01_79], (instregex "RET(L|Q|W)?$", - "LRETI?(L|Q|W)")>; +def : InstRW<[AtomWrite01_79], (instregex "RET(16|32|64)?$", + "LRETI?(16|32|64)")>; def AtomWrite01_92 : SchedWriteRes<[AtomPort01]> { let Latency = 92; diff --git a/llvm/lib/Target/X86/X86ScheduleZnver1.td b/llvm/lib/Target/X86/X86ScheduleZnver1.td --- a/llvm/lib/Target/X86/X86ScheduleZnver1.td +++ b/llvm/lib/Target/X86/X86ScheduleZnver1.td @@ -697,7 +697,7 @@ def ZnWriteRET : SchedWriteRes<[ZnALU03]> { let NumMicroOps = 2; } -def : InstRW<[ZnWriteRET], (instregex "RET(L|Q|W)", "LRET(L|Q|W)", +def : InstRW<[ZnWriteRET], (instregex "RET(16|32|64)", "LRET(16|32|64)", "IRET(16|32|64)")>; //-- Logic instructions --// diff --git a/llvm/lib/Target/X86/X86ScheduleZnver2.td b/llvm/lib/Target/X86/X86ScheduleZnver2.td --- a/llvm/lib/Target/X86/X86ScheduleZnver2.td +++ b/llvm/lib/Target/X86/X86ScheduleZnver2.td @@ -697,7 +697,7 @@ def Zn2WriteRET : SchedWriteRes<[Zn2ALU03]> { let NumMicroOps = 2; } -def : InstRW<[Zn2WriteRET], (instregex "RET(L|Q|W)", "LRET(L|Q|W)", +def : InstRW<[Zn2WriteRET], (instregex "RET(16|32|64)", "LRET(16|32|64)", "IRET(16|32|64)")>; //-- Logic instructions --// diff --git a/llvm/unittests/tools/llvm-exegesis/X86/SnippetRepetitorTest.cpp b/llvm/unittests/tools/llvm-exegesis/X86/SnippetRepetitorTest.cpp --- a/llvm/unittests/tools/llvm-exegesis/X86/SnippetRepetitorTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/X86/SnippetRepetitorTest.cpp @@ -71,7 +71,7 @@ ASSERT_EQ(MF->getNumBlockIDs(), 1u); EXPECT_THAT(MF->getBlockNumbered(0)->instrs(), ElementsAre(HasOpcode(X86::NOOP), HasOpcode(X86::NOOP), - HasOpcode(X86::NOOP), HasOpcode(X86::RETQ))); + HasOpcode(X86::NOOP), HasOpcode(X86::RET64))); } TEST_F(X86SnippetRepetitorTest, Loop) { @@ -90,7 +90,7 @@ LiveReg(State.getExegesisTarget().getLoopCounterRegister( State.getTargetMachine().getTargetTriple())))); EXPECT_THAT(MF->getBlockNumbered(2)->instrs(), - ElementsAre(HasOpcode(X86::RETQ))); + ElementsAre(HasOpcode(X86::RET64))); } } // namespace