diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -124,12 +124,12 @@ bool matchingInlineAsm, unsigned VariantID = 0) { // In Code16GCC mode, match as 32-bit. if (Code16GCC) - SwitchMode(X86::Mode32Bit); + SwitchMode(X86::Is32Bit); unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, matchingInlineAsm, VariantID); if (Code16GCC) - SwitchMode(X86::Mode16Bit); + SwitchMode(X86::Is16Bit); return rv; } @@ -1193,19 +1193,19 @@ bool is64BitMode() const { // FIXME: Can tablegen auto-generate this? - return getSTI().getFeatureBits()[X86::Mode64Bit]; + return getSTI().getFeatureBits()[X86::Is64Bit]; } bool is32BitMode() const { // FIXME: Can tablegen auto-generate this? - return getSTI().getFeatureBits()[X86::Mode32Bit]; + return getSTI().getFeatureBits()[X86::Is32Bit]; } bool is16BitMode() const { // FIXME: Can tablegen auto-generate this? - return getSTI().getFeatureBits()[X86::Mode16Bit]; + return getSTI().getFeatureBits()[X86::Is16Bit]; } void SwitchMode(unsigned mode) { MCSubtargetInfo &STI = copySTI(); - FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86::Mode16Bit}); + FeatureBitset AllModes({X86::Is64Bit, X86::Is32Bit, X86::Is16Bit}); FeatureBitset OldMode = STI.getFeatureBits() & AllModes; FeatureBitset FB = ComputeAvailableFeatures( STI.ToggleFeature(OldMode.flip(mode))); @@ -3346,7 +3346,7 @@ Name = Next; PatchedName = Name; - ForcedDataPrefix = X86::Mode32Bit; + ForcedDataPrefix = X86::Is32Bit; IsPrefix = false; } } @@ -4313,15 +4313,15 @@ // In 16-bit mode, if data32 is specified, temporarily switch to 32-bit mode // when matching the instruction. - if (ForcedDataPrefix == X86::Mode32Bit) - SwitchMode(X86::Mode32Bit); + if (ForcedDataPrefix == X86::Is32Bit) + SwitchMode(X86::Is32Bit); // First, try a direct match. FeatureBitset MissingFeatures; unsigned OriginalError = MatchInstruction(Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm, isParsingIntelSyntax()); - if (ForcedDataPrefix == X86::Mode32Bit) { - SwitchMode(X86::Mode16Bit); + if (ForcedDataPrefix == X86::Is32Bit) { + SwitchMode(X86::Is16Bit); ForcedDataPrefix = 0; } switch (OriginalError) { @@ -4886,7 +4886,7 @@ if (IDVal == ".code16") { Parser.Lex(); if (!is16BitMode()) { - SwitchMode(X86::Mode16Bit); + SwitchMode(X86::Is16Bit); getParser().getStreamer().emitAssemblerFlag(MCAF_Code16); } } else if (IDVal == ".code16gcc") { @@ -4894,19 +4894,19 @@ Parser.Lex(); Code16GCC = true; if (!is16BitMode()) { - SwitchMode(X86::Mode16Bit); + SwitchMode(X86::Is16Bit); getParser().getStreamer().emitAssemblerFlag(MCAF_Code16); } } else if (IDVal == ".code32") { Parser.Lex(); if (!is32BitMode()) { - SwitchMode(X86::Mode32Bit); + SwitchMode(X86::Is32Bit); getParser().getStreamer().emitAssemblerFlag(MCAF_Code32); } } else if (IDVal == ".code64") { Parser.Lex(); if (!is64BitMode()) { - SwitchMode(X86::Mode64Bit); + SwitchMode(X86::Is64Bit); getParser().getStreamer().emitAssemblerFlag(MCAF_Code64); } } else { diff --git a/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp --- a/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp +++ b/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp @@ -1722,13 +1722,13 @@ std::unique_ptr MII) : MCDisassembler(STI, Ctx), MII(std::move(MII)) { const FeatureBitset &FB = STI.getFeatureBits(); - if (FB[X86::Mode16Bit]) { + if (FB[X86::Is16Bit]) { fMode = MODE_16BIT; return; - } else if (FB[X86::Mode32Bit]) { + } else if (FB[X86::Is32Bit]) { fMode = MODE_32BIT; return; - } else if (FB[X86::Mode64Bit]) { + } else if (FB[X86::Is64Bit]) { fMode = MODE_64BIT; return; } diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp @@ -55,7 +55,7 @@ // InstrInfo.td as soon as Requires clause is supported properly // for InstAlias. if (MI->getOpcode() == X86::CALLpcrel32 && - (STI.getFeatureBits()[X86::Mode64Bit])) { + (STI.getFeatureBits()[X86::Is64Bit])) { OS << "\tcallq\t"; printPCRelImm(MI, Address, 0, OS); } @@ -65,8 +65,8 @@ // 0x66 to be interpreted as "data16" by the asm printer. // Thus we add an adjustment here in order to print the "right" instruction. else if (MI->getOpcode() == X86::DATA16_PREFIX && - STI.getFeatureBits()[X86::Mode16Bit]) { - OS << "\tdata32"; + STI.getFeatureBits()[X86::Is16Bit]) { + OS << "\tdata32"; } // Try to print any aliases first. else if (!printAliasInstr(MI, Address, OS) && !printVecCompareInstr(MI, OS)) diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -293,7 +293,7 @@ /// - If the instruction has a ESP/EBP base register, use SS. /// - Otherwise use DS. uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const { - assert((STI.hasFeature(X86::Mode32Bit) || STI.hasFeature(X86::Mode64Bit)) && + assert((STI.hasFeature(X86::Is32Bit) || STI.hasFeature(X86::Is64Bit)) && "Prefixes can be added only in 32-bit or 64-bit mode."); const MCInstrDesc &Desc = MCII->get(Inst.getOpcode()); uint64_t TSFlags = Desc.TSFlags; @@ -334,7 +334,7 @@ if (SegmentReg != 0) return X86::getSegmentOverridePrefixForReg(SegmentReg); - if (STI.hasFeature(X86::Mode64Bit)) + if (STI.hasFeature(X86::Is64Bit)) return X86::CS_Encoding; if (MemoryOperand >= 0) { @@ -493,7 +493,7 @@ return false; // Branches only need to be aligned in 32-bit or 64-bit mode. - if (!(STI.hasFeature(X86::Mode64Bit) || STI.hasFeature(X86::Mode32Bit))) + if (!(STI.hasFeature(X86::Is64Bit) || STI.hasFeature(X86::Is32Bit))) return false; return true; @@ -755,7 +755,7 @@ void X86AsmBackend::relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const { // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel. - bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit]; + bool Is16BitMode = STI.getFeatureBits()[X86::Is16Bit]; unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode); if (RelaxedOp == Inst.getOpcode()) { @@ -774,7 +774,7 @@ static bool isFullyRelaxed(const MCRelaxableFragment &RF) { auto &Inst = RF.getInst(); auto &STI = *RF.getSubtargetInfo(); - bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit]; + bool Is16BitMode = STI.getFeatureBits()[X86::Is16Bit]; return getRelaxedOpcode(Inst, Is16BitMode) == Inst.getOpcode(); } @@ -998,9 +998,9 @@ } unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const { - if (STI.hasFeature(X86::Mode16Bit)) + if (STI.hasFeature(X86::Is16Bit)) return 4; - if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Mode64Bit)) + if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit)) return 1; if (STI.getFeatureBits()[X86::TuningFast7ByteNOP]) return 7; @@ -1055,7 +1055,7 @@ }; const char(*Nops)[11] = - STI->getFeatureBits()[X86::Mode16Bit] ? Nops16Bit : Nops32Bit; + STI->getFeatureBits()[X86::Is16Bit] ? Nops16Bit : Nops32Bit; uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI); diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp @@ -390,9 +390,9 @@ // Address-Size override prefix if (Flags & X86::IP_HAS_AD_SIZE && !X86_MC::needsAddressSizeOverride(*MI, STI, MemoryOperand, TSFlags)) { - if (STI.hasFeature(X86::Mode16Bit) || STI.hasFeature(X86::Mode64Bit)) + if (STI.hasFeature(X86::Is16Bit) || STI.hasFeature(X86::Is64Bit)) O << "\taddr32\t"; - else if (STI.hasFeature(X86::Mode32Bit)) + else if (STI.hasFeature(X86::Is32Bit)) O << "\taddr16\t"; } } diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp @@ -44,7 +44,7 @@ // In 16-bit mode, print data16 as data32. if (MI->getOpcode() == X86::DATA16_PREFIX && - STI.getFeatureBits()[X86::Mode16Bit]) { + STI.getFeatureBits()[X86::Is16Bit]) { OS << "\tdata32"; } else if (!printAliasInstr(MI, Address, OS) && !printVecCompareInstr(MI, OS)) printInstruction(MI, Address, OS); diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -333,7 +333,7 @@ // Handle %rip relative addressing. if (BaseReg == X86::RIP || BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode - assert(STI.hasFeature(X86::Mode64Bit) && + assert(STI.hasFeature(X86::Is64Bit) && "Rip-relative addressing requires 64-bit mode"); assert(IndexReg.getReg() == 0 && !ForceSIB && "Invalid rip-relative address"); @@ -482,7 +482,7 @@ BaseRegNo != N86::ESP && // If there is no base register and we're in 64-bit mode, we need a SIB // byte to emit an addr that is just 'disp32' (the non-RIP relative form). - (!STI.hasFeature(X86::Mode64Bit) || BaseReg != 0)) { + (!STI.hasFeature(X86::Is64Bit) || BaseReg != 0)) { if (BaseReg == 0) { // [disp32] in X86-32 mode emitByte(modRMByte(0, RegOpcodeField, 5), OS); @@ -1252,7 +1252,7 @@ // Emit the operand size opcode prefix as needed. if ((TSFlags & X86II::OpSizeMask) == - (STI.hasFeature(X86::Mode16Bit) ? X86II::OpSize32 : X86II::OpSize16)) + (STI.hasFeature(X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16)) emitByte(0x66, OS); // Emit the LOCK opcode prefix. @@ -1276,9 +1276,9 @@ } // Handle REX prefix. - assert((STI.hasFeature(X86::Mode64Bit) || !(TSFlags & X86II::REX_W)) && + assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) && "REX.W requires 64bit mode."); - bool HasREX = STI.hasFeature(X86::Mode64Bit) + bool HasREX = STI.hasFeature(X86::Is64Bit) ? emitREXPrefix(MemOperand, MI, STI, OS) : false; @@ -1377,7 +1377,7 @@ case X86II::RawFrm: emitByte(BaseOpcode + OpcodeOffset, OS); - if (!STI.hasFeature(X86::Mode64Bit) || !isPCRel32Branch(MI, MCII)) + if (!STI.hasFeature(X86::Is64Bit) || !isPCRel32Branch(MI, MCII)) break; const MCOperand &Op = MI.getOperand(CurOp++); diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp @@ -86,7 +86,7 @@ const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg); - if (STI.hasFeature(X86::Mode16Bit) && Base.isReg() && Base.getReg() == 0 && + if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && Base.getReg() == 0 && Index.isReg() && Index.getReg() == 0) return true; return isMemOperand(MI, Op, X86::GR16RegClassID); @@ -114,9 +114,9 @@ const MCSubtargetInfo &STI, int MemoryOperand, uint64_t TSFlags) { uint64_t AdSize = TSFlags & X86II::AdSizeMask; - bool Is16BitMode = STI.hasFeature(X86::Mode16Bit); - bool Is32BitMode = STI.hasFeature(X86::Mode32Bit); - bool Is64BitMode = STI.hasFeature(X86::Mode64Bit); + bool Is16BitMode = STI.hasFeature(X86::Is16Bit); + bool Is32BitMode = STI.hasFeature(X86::Is32Bit); + bool Is64BitMode = STI.hasFeature(X86::Is64Bit); if ((Is16BitMode && AdSize == X86II::AdSize32) || (Is32BitMode && AdSize == X86II::AdSize16) || (Is64BitMode && AdSize == X86II::AdSize32)) @@ -150,15 +150,15 @@ if (MemoryOperand < 0) return false; - if (STI.hasFeature(X86::Mode64Bit)) { + if (STI.hasFeature(X86::Is64Bit)) { assert(!is16BitMemOperand(MI, MemoryOperand, STI)); return is32BitMemOperand(MI, MemoryOperand); } - if (STI.hasFeature(X86::Mode32Bit)) { + if (STI.hasFeature(X86::Is32Bit)) { assert(!is64BitMemOperand(MI, MemoryOperand)); return is16BitMemOperand(MI, MemoryOperand, STI); } - assert(STI.hasFeature(X86::Mode16Bit)); + assert(STI.hasFeature(X86::Is16Bit)); assert(!is64BitMemOperand(MI, MemoryOperand)); return !is16BitMemOperand(MI, MemoryOperand, STI); } diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -19,12 +19,12 @@ // X86 Subtarget state // -def Mode64Bit : SubtargetFeature<"64bit-mode", "Is64Bit", "true", - "64-bit mode (x86_64)">; -def Mode32Bit : SubtargetFeature<"32bit-mode", "Is32Bit", "true", - "32-bit mode (80386)">; -def Mode16Bit : SubtargetFeature<"16bit-mode", "Is16Bit", "true", - "16-bit mode (i8086)">; +def Is64Bit : SubtargetFeature<"64bit-mode", "Is64Bit", "true", + "64-bit mode (x86_64)">; +def Is32Bit : SubtargetFeature<"32bit-mode", "Is32Bit", "true", + "32-bit mode (80386)">; +def Is16Bit : SubtargetFeature<"16bit-mode", "Is16Bit", "true", + "16-bit mode (i8086)">; //===----------------------------------------------------------------------===// // X86 Subtarget ISA features @@ -36,10 +36,10 @@ def FeatureNOPL : SubtargetFeature<"nopl", "HasNOPL", "true", "Enable NOPL instruction">; -def FeatureCMOV : SubtargetFeature<"cmov","HasCMov", "true", +def FeatureCMOV : SubtargetFeature<"cmov","HasCMOV", "true", "Enable conditional move instructions">; -def FeatureCMPXCHG8B : SubtargetFeature<"cx8", "HasCmpxchg8b", "true", +def FeatureCMPXCHG8B : SubtargetFeature<"cx8", "HasCMPXCHG8B", "true", "Support CMPXCHG8B instructions">; def FeatureCRC32 : SubtargetFeature<"crc32", "HasCRC32", "true", @@ -98,9 +98,9 @@ // feature, because SSE2 can be disabled (e.g. for compiling OS kernels) // without disabling 64-bit mode. Nothing should imply this feature bit. It // is used to enforce that only 64-bit capable CPUs are used in 64-bit mode. -def Feature64Bit : SubtargetFeature<"64bit", "HasX86_64", "true", +def FeatureX86_64 : SubtargetFeature<"64bit", "HasX86_64", "true", "Support 64-bit instructions">; -def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCmpxchg16b", "true", +def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCMPXCHG16B", "true", "64-bit with cmpxchg16b", [FeatureCMPXCHG8B]>; def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true", @@ -234,7 +234,7 @@ "Support PRFCHW instructions">; def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true", "Support RDSEED instruction">; -def FeatureLAHFSAHF : SubtargetFeature<"sahf", "HasLAHFSAHF64", "true", +def FeatureLAHFSAHF64 : SubtargetFeature<"sahf", "HasLAHFSAHF64", "true", "Support LAHF and SAHF instructions in 64-bit mode">; def FeatureMWAITX : SubtargetFeature<"mwaitx", "HasMWAITX", "true", "Enable MONITORX/MWAITX timer functionality">; @@ -637,10 +637,10 @@ // x86-64 and x86-64-v[234] list X86_64V1Features = [ FeatureX87, FeatureCMPXCHG8B, FeatureCMOV, FeatureMMX, FeatureSSE2, - FeatureFXSR, FeatureNOPL, Feature64Bit + FeatureFXSR, FeatureNOPL, FeatureX86_64, ]; list X86_64V2Features = !listconcat(X86_64V1Features, [ - FeatureCMPXCHG16B, FeatureLAHFSAHF, FeatureCRC32, FeaturePOPCNT, + FeatureCMPXCHG16B, FeatureLAHFSAHF64, FeatureCRC32, FeaturePOPCNT, FeatureSSE42 ]); list X86_64V3Features = !listconcat(X86_64V2Features, [ @@ -878,10 +878,10 @@ FeatureSSSE3, FeatureFXSR, FeatureNOPL, - Feature64Bit, + FeatureX86_64, FeatureCMPXCHG16B, FeatureMOVBE, - FeatureLAHFSAHF]; + FeatureLAHFSAHF64]; list AtomTuning = [ProcIntelAtom, TuningSlowUAMem16, TuningLEAForSP, @@ -983,14 +983,14 @@ FeatureMMX, FeatureFXSR, FeatureNOPL, - Feature64Bit, + FeatureX86_64, FeatureCMPXCHG16B, FeatureCRC32, FeaturePOPCNT, FeaturePCLMUL, FeatureXSAVE, FeatureXSAVEOPT, - FeatureLAHFSAHF, + FeatureLAHFSAHF64, FeatureAES, FeatureRDRAND, FeatureF16C, @@ -1031,9 +1031,9 @@ FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT, - FeatureLAHFSAHF, + FeatureLAHFSAHF64, FeatureCMOV, - Feature64Bit]; + FeatureX86_64]; list BarcelonaTuning = [TuningFastScalarShiftMasks, TuningSlowSHLD, TuningSBBDepBreaking, @@ -1048,12 +1048,12 @@ FeatureSSE4A, FeatureFXSR, FeatureNOPL, - Feature64Bit, + FeatureX86_64, FeatureCMPXCHG16B, FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT, - FeatureLAHFSAHF]; + FeatureLAHFSAHF64]; list BtVer1Tuning = [TuningFast15ByteNOP, TuningFastScalarShiftMasks, TuningFastVectorShiftMasks, @@ -1088,7 +1088,7 @@ FeatureCMPXCHG8B, FeatureCMOV, FeatureXOP, - Feature64Bit, + FeatureX86_64, FeatureCMPXCHG16B, FeatureAES, FeatureCRC32, @@ -1101,7 +1101,7 @@ FeaturePOPCNT, FeatureXSAVE, FeatureLWP, - FeatureLAHFSAHF]; + FeatureLAHFSAHF64]; list BdVer1Tuning = [TuningSlowSHLD, TuningFast11ByteNOP, TuningFastScalarShiftMasks, @@ -1148,7 +1148,7 @@ FeatureCLFLUSHOPT, FeatureCLZERO, FeatureCMOV, - Feature64Bit, + FeatureX86_64, FeatureCMPXCHG16B, FeatureCRC32, FeatureF16C, @@ -1156,7 +1156,7 @@ FeatureFSGSBase, FeatureFXSR, FeatureNOPL, - FeatureLAHFSAHF, + FeatureLAHFSAHF64, FeatureLZCNT, FeatureMMX, FeatureMOVBE, @@ -1220,13 +1220,13 @@ // NOTE: CMPXCHG8B is here for legacy compatibility so that it is only disabled // if i386/i486 is specifically requested. // NOTE: 64Bit is here as "generic" is the default llc CPU. The X86Subtarget -// constructor checks that any CPU used in 64-bit mode has Feature64Bit enabled. -// It has no effect on code generation. +// constructor checks that any CPU used in 64-bit mode has FeatureX86_64 +// enabled. It has no effect on code generation. // NOTE: As a default tuning, "generic" aims to produce code optimized for the // most common X86 processors. The tunings might be changed over time. It is // recommended to use "x86-64" in lit tests for consistency. def : ProcModel<"generic", SandyBridgeModel, - [FeatureX87, FeatureCMPXCHG8B, Feature64Bit], + [FeatureX87, FeatureCMPXCHG8B, FeatureX86_64], [TuningSlow3OpsLEA, TuningSlowDivide64, TuningMacroFusion, @@ -1306,7 +1306,7 @@ FeatureSSE3, FeatureFXSR, FeatureNOPL, - Feature64Bit, + FeatureX86_64, FeatureCMPXCHG16B, ], [ @@ -1323,9 +1323,9 @@ FeatureSSSE3, FeatureFXSR, FeatureNOPL, - Feature64Bit, + FeatureX86_64, FeatureCMPXCHG16B, - FeatureLAHFSAHF + FeatureLAHFSAHF64 ], [ TuningMacroFusion, @@ -1340,9 +1340,9 @@ FeatureSSE41, FeatureFXSR, FeatureNOPL, - Feature64Bit, + FeatureX86_64, FeatureCMPXCHG16B, - FeatureLAHFSAHF + FeatureLAHFSAHF64 ], [ TuningMacroFusion, @@ -1452,7 +1452,7 @@ foreach P = ["k8", "opteron", "athlon64", "athlon-fx"] in { def : Proc; } @@ -1460,7 +1460,7 @@ foreach P = ["k8-sse3", "opteron-sse3", "athlon64-sse3"] in { def : Proc; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -170,7 +170,7 @@ // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b. // FIXME: Should we be limiting the atomic size on other configs? Default is // 1024. - if (!Subtarget.hasCmpxchg8b()) + if (!Subtarget.hasCMPXCHG8B()) setMaxAtomicSizeInBitsSupported(32); // Set up the register classes. @@ -516,7 +516,7 @@ if (!Subtarget.is64Bit()) setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); - if (Subtarget.hasCmpxchg16b()) { + if (Subtarget.hasCMPXCHG16B()) { setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); } @@ -30355,9 +30355,9 @@ unsigned OpWidth = MemType->getPrimitiveSizeInBits(); if (OpWidth == 64) - return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit(); + return Subtarget.hasCMPXCHG8B() && !Subtarget.is64Bit(); if (OpWidth == 128) - return Subtarget.hasCmpxchg16b(); + return Subtarget.hasCMPXCHG16B(); return false; } @@ -32600,7 +32600,7 @@ EVT T = N->getValueType(0); assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); bool Regs64bit = T == MVT::i128; - assert((!Regs64bit || Subtarget.hasCmpxchg16b()) && + assert((!Regs64bit || Subtarget.hasCMPXCHG16B()) && "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B"); MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; SDValue cpInL, cpInH; diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -981,8 +981,8 @@ def HasRDPID : Predicate<"Subtarget->hasRDPID()">; def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">; def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">; -def HasCmpxchg8b : Predicate<"Subtarget->hasCmpxchg8b()">; -def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">; +def HasCmpxchg8b : Predicate<"Subtarget->hasCMPXCHG8B()">; +def HasCmpxchg16b: Predicate<"Subtarget->hasCMPXCHG16B()">; def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">; def HasENQCMD : Predicate<"Subtarget->hasENQCMD()">; def HasKL : Predicate<"Subtarget->hasKL()">; @@ -996,17 +996,17 @@ def HasUINTR : Predicate<"Subtarget->hasUINTR()">; def HasCRC32 : Predicate<"Subtarget->hasCRC32()">; def Not64BitMode : Predicate<"!Subtarget->is64Bit()">, - AssemblerPredicate<(all_of (not Mode64Bit)), "Not 64-bit mode">; + AssemblerPredicate<(all_of (not Is64Bit)), "Not 64-bit mode">; def In64BitMode : Predicate<"Subtarget->is64Bit()">, - AssemblerPredicate<(all_of Mode64Bit), "64-bit mode">; + AssemblerPredicate<(all_of Is64Bit), "64-bit mode">; def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">; def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">; def In16BitMode : Predicate<"Subtarget->is16Bit()">, - AssemblerPredicate<(all_of Mode16Bit), "16-bit mode">; + AssemblerPredicate<(all_of Is16Bit), "16-bit mode">; def Not16BitMode : Predicate<"!Subtarget->is16Bit()">, - AssemblerPredicate<(all_of (not Mode16Bit)), "Not 16-bit mode">; + AssemblerPredicate<(all_of (not Is16Bit)), "Not 16-bit mode">; def In32BitMode : Predicate<"Subtarget->is32Bit()">, - AssemblerPredicate<(all_of Mode32Bit), "32-bit mode">; + AssemblerPredicate<(all_of Is32Bit), "32-bit mode">; def IsWin64 : Predicate<"Subtarget->isTargetWin64()">; def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">; def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||" diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h --- a/llvm/lib/Target/X86/X86Subtarget.h +++ b/llvm/lib/Target/X86/X86Subtarget.h @@ -76,7 +76,7 @@ bool HasX87 = false; /// True if the processor supports CMPXCHG8B. - bool HasCmpxchg8b = false; + bool HasCMPXCHG8B = false; /// True if this processor has NOPL instruction /// (generally pentium pro+). @@ -84,7 +84,7 @@ /// True if this processor has conditional move instructions /// (generally pentium pro+). - bool HasCMov = false; + bool HasCMOV = false; /// True if the processor supports X86-64 instructions. bool HasX86_64 = false; @@ -227,7 +227,7 @@ /// True if this processor has the CMPXCHG16B instruction; /// this is true for most x86-64 chips, but not the first AMD chips. - bool HasCmpxchg16b = false; + bool HasCMPXCHG16B = false; /// True if the LEA instruction should be used for adjusting /// the stack pointer. This is an optimization for Intel Atom processors. @@ -632,11 +632,11 @@ void setPICStyle(PICStyles::Style Style) { PICStyle = Style; } bool hasX87() const { return HasX87; } - bool hasCmpxchg8b() const { return HasCmpxchg8b; } + bool hasCMPXCHG8B() const { return HasCMPXCHG8B; } bool hasNOPL() const { return HasNOPL; } // SSE codegen depends on cmovs, and all SSE1+ processors support them. // All 64-bit processors support cmov. - bool hasCMov() const { return HasCMov || X86SSELevel >= SSE1 || is64Bit(); } + bool hasCMov() const { return HasCMOV || X86SSELevel >= SSE1 || is64Bit(); } bool hasSSE1() const { return X86SSELevel >= SSE1; } bool hasSSE2() const { return X86SSELevel >= SSE2; } bool hasSSE3() const { return X86SSELevel >= SSE3; } @@ -712,7 +712,7 @@ bool isUnalignedMem16Slow() const { return IsUnalignedMem16Slow; } bool isUnalignedMem32Slow() const { return IsUnalignedMem32Slow; } bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; } - bool hasCmpxchg16b() const { return HasCmpxchg16b && is64Bit(); } + bool hasCMPXCHG16B() const { return HasCMPXCHG16B && is64Bit(); } bool useLeaForSP() const { return UseLeaForSP; } bool hasPOPCNTFalseDeps() const { return HasPOPCNTFalseDeps; } bool hasLZCNTFalseDeps() const { return HasLZCNTFalseDeps; } diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -38,12 +38,12 @@ const FeatureBitset InlineFeatureIgnoreList = { // This indicates the CPU is 64 bit capable not that we are in 64-bit // mode. - X86::Feature64Bit, + X86::FeatureX86_64, // These features don't have any intrinsics or ABI effect. X86::FeatureNOPL, X86::FeatureCMPXCHG16B, - X86::FeatureLAHFSAHF, + X86::FeatureLAHFSAHF64, // Some older targets can be setup to fold unaligned loads. X86::FeatureSSEUnalignedMem,