diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -82,6 +82,10 @@ 3 // IC_GE }; +#define GET_REGISTER_MATCHER +#define GET_SUBTARGET_FEATURE_NAME +#include "X86GenAsmMatcher.inc" + class X86AsmParser : public MCTargetAsmParser { ParseInstructionInfo *InstInfo; bool Code16GCC; @@ -1282,13 +1286,6 @@ }; } // end anonymous namespace -/// @name Auto-generated Match Functions -/// { - -static unsigned MatchRegisterName(StringRef Name); - -/// } - static bool CheckBaseRegAndIndexRegAndScale(unsigned BaseReg, unsigned IndexReg, unsigned Scale, bool Is64BitMode, StringRef &ErrMsg) { @@ -3935,8 +3932,6 @@ return false; } -static const char *getSubtargetFeatureName(uint64_t Val); - void X86AsmParser::emitWarningForSpecialLVIInstruction(SMLoc Loc) { Warning(Loc, "Instruction may be vulnerable to LVI and " "requires manual mitigation"); @@ -4192,7 +4187,13 @@ } switch (OriginalError) { default: llvm_unreachable("Unexpected match result!"); - case Match_Success: + case Match_Success: { + uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; + bool HasImplicitIn64BitMode = + (TSFlags & X86II::REX_W) && !(TSFlags & X86II::EncodingMask); + if (!is64BitMode() && HasImplicitIn64BitMode) + return ErrorMissingFeature(IDLoc, FeatureBitset({Feature_In64BitModeBit}), + MatchingInlineAsm); if (!MatchingInlineAsm && validateInstruction(Inst, Operands)) return true; // Some instructions need post-processing to, for example, tweak which @@ -4207,6 +4208,7 @@ emitInstruction(Inst, Operands, Out); Opcode = Inst.getOpcode(); return false; + } case Match_InvalidImmUnsignedi4: { SMLoc ErrorLoc = ((X86Operand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) @@ -4991,7 +4993,5 @@ RegisterMCAsmParser Y(getTheX86_64Target()); } -#define GET_REGISTER_MATCHER #define GET_MATCHER_IMPLEMENTATION -#define GET_SUBTARGET_FEATURE_NAME #include "X86GenAsmMatcher.inc" diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td --- a/llvm/lib/Target/X86/X86InstrArithmetic.td +++ b/llvm/lib/Target/X86/X86InstrArithmetic.td @@ -595,16 +595,12 @@ } // CodeSize = 1, hasSideEffects = 0 } // Constraints = "$src1 = $dst", SchedRW -let CodeSize = 2, SchedRW = [WriteALURMW] in { -let Predicates = [UseIncDec] in { +let CodeSize = 2, SchedRW = [WriteALURMW], Predicates = [UseIncDec] in { def INC8m : INCDECM; def INC16m : INCDECM; def INC32m : INCDECM; -} // Predicates -let Predicates = [UseIncDec, In64BitMode] in { def INC64m : INCDECM; -} // Predicates -} // CodeSize = 2, SchedRW +} // CodeSize = 2, SchedRW, Predicates let Constraints = "$src1 = $dst", SchedRW = [WriteALU] in { let isConvertibleToThreeAddress = 1, CodeSize = 2 in { // Can xform into LEA. @@ -621,16 +617,12 @@ } // CodeSize = 1, hasSideEffects = 0 } // Constraints = "$src1 = $dst", SchedRW -let CodeSize = 2, SchedRW = [WriteALURMW] in { -let Predicates = [UseIncDec] in { +let CodeSize = 2, SchedRW = [WriteALURMW], Predicates = [UseIncDec] in { def DEC8m : INCDECM; def DEC16m : INCDECM; def DEC32m : INCDECM; -} // Predicates -let Predicates = [UseIncDec, In64BitMode] in { def DEC64m : INCDECM; -} // Predicates -} // CodeSize = 2, SchedRW +} // CodeSize = 2, SchedRW, Predicates } // Defs = [EFLAGS] // Extra precision multiplication @@ -672,8 +664,7 @@ def MUL32m : MulOpM<0xF7, MRM4m, "mul", Xi32, WriteIMul32, []>; // RAX,RDX = RAX*[mem64] let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in -def MUL64m : MulOpM<0xF7, MRM4m, "mul", Xi64, WriteIMul64, []>, - Requires<[In64BitMode]>; +def MUL64m : MulOpM<0xF7, MRM4m, "mul", Xi64, WriteIMul64, []>; } let hasSideEffects = 0 in { @@ -702,8 +693,7 @@ def IMUL32m : MulOpM<0xF7, MRM5m, "imul", Xi32, WriteIMul32, []>; // RAX,RDX = RAX*[mem64] let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in -def IMUL64m : MulOpM<0xF7, MRM5m, "imul", Xi64, WriteIMul64, []>, - Requires<[In64BitMode]>; +def IMUL64m : MulOpM<0xF7, MRM5m, "imul", Xi64, WriteIMul64, []>; } let Defs = [EFLAGS] in { @@ -784,8 +774,7 @@ def DIV32m : MulOpM<0xF7, MRM6m, "div", Xi32, WriteDiv32, []>; // RDX:RAX/[mem64] = RAX,RDX let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in -def DIV64m : MulOpM<0xF7, MRM6m, "div", Xi64, WriteDiv64, []>, - Requires<[In64BitMode]>; +def DIV64m : MulOpM<0xF7, MRM6m, "div", Xi64, WriteDiv64, []>; } // Signed division/remainder. @@ -814,8 +803,7 @@ def IDIV32m: MulOpM<0xF7, MRM7m, "idiv", Xi32, WriteIDiv32, []>; let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in // RDX:RAX/[mem64] = RAX,RDX // RDX:RAX/[mem64] = RAX,RDX -def IDIV64m: MulOpM<0xF7, MRM7m, "idiv", Xi64, WriteIDiv64, []>, - Requires<[In64BitMode]>; +def IDIV64m: MulOpM<0xF7, MRM7m, "idiv", Xi64, WriteIDiv64, []>; } } // hasSideEffects = 0 @@ -838,7 +826,7 @@ def NEG8m : NegOpM<0xF6, "neg", Xi8>; def NEG16m : NegOpM<0xF7, "neg", Xi16>; def NEG32m : NegOpM<0xF7, "neg", Xi32>; -def NEG64m : NegOpM<0xF7, "neg", Xi64>, Requires<[In64BitMode]>; +def NEG64m : NegOpM<0xF7, "neg", Xi64>; } // SchedRW } // Defs = [EFLAGS] @@ -856,7 +844,7 @@ def NOT8m : NotOpM<0xF6, "not", Xi8>; def NOT16m : NotOpM<0xF7, "not", Xi16>; def NOT32m : NotOpM<0xF7, "not", Xi32>; -def NOT64m : NotOpM<0xF7, "not", Xi64>, Requires<[In64BitMode]>; +def NOT64m : NotOpM<0xF7, "not", Xi64>; } // SchedRW } // CodeSize @@ -917,13 +905,11 @@ // first so that they are slightly preferred to the mi forms. def NAME#16mi8 : BinOpMI8_RMW; def NAME#32mi8 : BinOpMI8_RMW; - let Predicates = [In64BitMode] in def NAME#64mi8 : BinOpMI8_RMW; def NAME#8mi : BinOpMI_RMW<0x80, mnemonic, Xi8 , opnode, MemMRM>; def NAME#16mi : BinOpMI_RMW<0x80, mnemonic, Xi16, opnode, MemMRM>; def NAME#32mi : BinOpMI_RMW<0x80, mnemonic, Xi32, opnode, MemMRM>; - let Predicates = [In64BitMode] in def NAME#64mi32 : BinOpMI_RMW<0x80, mnemonic, Xi64, opnode, MemMRM>; // These are for the disassembler since 0x82 opcode behaves like 0x80, but @@ -1002,13 +988,11 @@ // first so that they are slightly preferred to the mi forms. def NAME#16mi8 : BinOpMI8_RMW_FF; def NAME#32mi8 : BinOpMI8_RMW_FF; - let Predicates = [In64BitMode] in def NAME#64mi8 : BinOpMI8_RMW_FF; def NAME#8mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi8 , opnode, MemMRM>; def NAME#16mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi16, opnode, MemMRM>; def NAME#32mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi32, opnode, MemMRM>; - let Predicates = [In64BitMode] in def NAME#64mi32 : BinOpMI_RMW_FF<0x80, mnemonic, Xi64, opnode, MemMRM>; // These are for the disassembler since 0x82 opcode behaves like 0x80, but @@ -1083,13 +1067,11 @@ // first so that they are slightly preferred to the mi forms. def NAME#16mi8 : BinOpMI8_F; def NAME#32mi8 : BinOpMI8_F; - let Predicates = [In64BitMode] in def NAME#64mi8 : BinOpMI8_F; def NAME#8mi : BinOpMI_F<0x80, mnemonic, Xi8 , opnode, MemMRM>; def NAME#16mi : BinOpMI_F<0x80, mnemonic, Xi16, opnode, MemMRM>; def NAME#32mi : BinOpMI_F<0x80, mnemonic, Xi32, opnode, MemMRM>; - let Predicates = [In64BitMode] in def NAME#64mi32 : BinOpMI_F<0x80, mnemonic, Xi64, opnode, MemMRM>; // These are for the disassembler since 0x82 opcode behaves like 0x80, but @@ -1345,7 +1327,6 @@ def TEST8mi : BinOpMI_F<0xF6, "test", Xi8 , X86testpat, MRM0m>; def TEST16mi : BinOpMI_F<0xF6, "test", Xi16, X86testpat, MRM0m>; def TEST32mi : BinOpMI_F<0xF6, "test", Xi32, X86testpat, MRM0m>; - let Predicates = [In64BitMode] in def TEST64mi32 : BinOpMI_F<0xF6, "test", Xi64, X86testpat, MRM0m>; } // Defs = [EFLAGS] diff --git a/llvm/test/MC/X86/w-bit.s b/llvm/test/MC/X86/w-bit.s new file mode 100644 --- /dev/null +++ b/llvm/test/MC/X86/w-bit.s @@ -0,0 +1,18 @@ +# RUN: not llvm-mc --show-encoding -triple=i386 %s 2>&1 | FileCheck %s + +# CHECK: error: instruction requires: 64-bit mode +# CHECK: incq 0 +incq 0 + +# CHECK: error: instruction requires: 64-bit mode +# CHECK: decq 0 +decq 0 + +# CHECK: vpmaskmovq %ymm4, %ymm4, (%edx) +# CHECK: encoding: [0xc4,0xe2,0xdd,0x8e,0x22] +vpmaskmovq %ymm4, %ymm4, (%edx) + + +# CHECK: vunpcklpd %zmm2, %zmm2, %zmm2 {%k2} {z} +# CHECK: encoding: [0x62,0xf1,0xed,0xca,0x14,0xd2] +vunpcklpd %zmm2, %zmm2, %zmm2 {%k2} {z}