diff --git a/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h b/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h --- a/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h +++ b/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h @@ -120,8 +120,6 @@ ENUM_ENTRY(IC_VEX_XS, 2, "requires VEX and the XS prefix") \ ENUM_ENTRY(IC_VEX_XD, 2, "requires VEX and the XD prefix") \ ENUM_ENTRY(IC_VEX_OPSIZE, 2, "requires VEX and the OpSize prefix") \ - ENUM_ENTRY(IC_64BIT_VEX_OPSIZE, 4, "requires 64-bit mode and VEX") \ - ENUM_ENTRY(IC_64BIT_VEX_OPSIZE_ADSIZE, 5, "requires 64-bit mode, VEX, and AdSize")\ ENUM_ENTRY(IC_VEX_W, 3, "requires VEX and the W prefix") \ ENUM_ENTRY(IC_VEX_W_XS, 4, "requires VEX, W, and XS prefix") \ ENUM_ENTRY(IC_VEX_W_XD, 4, "requires VEX, W, and XD prefix") \ diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -3997,7 +3997,10 @@ //===---------------------------------------------------------------------===// let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecMoveLS.XMM.MR] in { -let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in +// As VEX does not have separate instruction contexts for address size +// overrides, VMASKMOVDQU and VMASKMOVDQU64 would have a decode conflict. +// Prefer VMASKMODDQU64. +let Uses = [EDI], Predicates = [HasAVX], isAsmParserOnly = 1 in def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask), "maskmovdqu\t{$mask, $src|$src, $mask}", @@ -4008,32 +4011,16 @@ (ins VR128:$src, VR128:$mask), "maskmovdqu\t{$mask, $src|$src, $mask}", [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, - VEX, VEX_WIG, AdSize64; -let Uses = [EDI], Predicates = [HasAVX,In64BitMode] in -def VMASKMOVDQUX32 : VPDI<0xF7, MRMSrcReg, (outs), - (ins VR128:$src, VR128:$mask), "", - [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, - VEX, VEX_WIG, AdSize32 { - let AsmString = "addr32 vmaskmovdqu\t{$mask, $src|$src, $mask}"; - let AsmVariantName = "NonParsable"; -} + VEX, VEX_WIG; -let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in +let Uses = [EDI], Predicates = [UseSSE2] in def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask), "maskmovdqu\t{$mask, $src|$src, $mask}", [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>; let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask), "maskmovdqu\t{$mask, $src|$src, $mask}", - [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, - AdSize64; -let Uses = [EDI], Predicates = [UseSSE2,In64BitMode] in -def MASKMOVDQUX32 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask), - "addr32 maskmovdqu\t{$mask, $src|$src, $mask}", - [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, - AdSize32 { - let AsmVariantName = "NonParsable"; -} + [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>; } // ExeDomain = SSEPackedInt diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -962,6 +962,12 @@ // These are not truly commutable so hide them from the default case. break; + case X86::MASKMOVDQU: + case X86::VMASKMOVDQU: + if (AsmPrinter.getSubtarget().is64Bit()) + OutMI.setFlags(X86::IP_HAS_AD_SIZE); + break; + default: { // If the instruction is a commutable arithmetic instruction we might be // able to commute the operands to get a 2 byte VEX prefix. diff --git a/llvm/lib/Target/X86/X86ScheduleBtVer2.td b/llvm/lib/Target/X86/X86ScheduleBtVer2.td --- a/llvm/lib/Target/X86/X86ScheduleBtVer2.td +++ b/llvm/lib/Target/X86/X86ScheduleBtVer2.td @@ -840,8 +840,8 @@ let ResourceCycles = [1, 1, 2, 2, 2, 16, 42]; let NumMicroOps = 63; } -def : InstRW<[JWriteMASKMOVDQU], (instrs MASKMOVDQU, MASKMOVDQU64, MASKMOVDQUX32, - VMASKMOVDQU, VMASKMOVDQU64, VMASKMOVDQUX32)>; +def : InstRW<[JWriteMASKMOVDQU], (instrs MASKMOVDQU, MASKMOVDQU64, + VMASKMOVDQU, VMASKMOVDQU64)>; /////////////////////////////////////////////////////////////////////////////// // SchedWriteVariant definitions. diff --git a/llvm/utils/TableGen/X86DisassemblerTables.cpp b/llvm/utils/TableGen/X86DisassemblerTables.cpp --- a/llvm/utils/TableGen/X86DisassemblerTables.cpp +++ b/llvm/utils/TableGen/X86DisassemblerTables.cpp @@ -105,8 +105,7 @@ case IC_64BIT_ADSIZE: return (noPrefix && inheritsFrom(child, IC_64BIT_OPSIZE_ADSIZE, noPrefix)); case IC_64BIT_OPSIZE_ADSIZE: - return (noPrefix && - inheritsFrom(child, IC_64BIT_VEX_OPSIZE_ADSIZE, noPrefix)); + return false; case IC_XD: return inheritsFrom(child, IC_64BIT_XD); case IC_XS: @@ -127,11 +126,10 @@ case IC_64BIT_OPSIZE: return inheritsFrom(child, IC_64BIT_REXW_OPSIZE) || (!AdSize64 && inheritsFrom(child, IC_64BIT_OPSIZE_ADSIZE)) || - (!AdSize64 && inheritsFrom(child, IC_64BIT_REXW_ADSIZE)) || - (!AdSize64 && inheritsFrom(child, IC_64BIT_VEX_OPSIZE_ADSIZE)); + (!AdSize64 && inheritsFrom(child, IC_64BIT_REXW_ADSIZE)); case IC_64BIT_XD: - return (inheritsFrom(child, IC_64BIT_REXW_XD) || - (!AdSize64 && inheritsFrom(child, IC_64BIT_XD_ADSIZE))); + return(inheritsFrom(child, IC_64BIT_REXW_XD) || + (!AdSize64 && inheritsFrom(child, IC_64BIT_XD_ADSIZE))); case IC_64BIT_XS: return(inheritsFrom(child, IC_64BIT_REXW_XS) || (!AdSize64 && inheritsFrom(child, IC_64BIT_XS_ADSIZE))); @@ -161,12 +159,7 @@ case IC_VEX_OPSIZE: return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE)) || (VEX_WIG && inheritsFrom(child, IC_VEX_W_OPSIZE)) || - (VEX_LIG && inheritsFrom(child, IC_VEX_L_OPSIZE)) || - inheritsFrom(child, IC_64BIT_VEX_OPSIZE); - case IC_64BIT_VEX_OPSIZE: - return inheritsFrom(child, IC_64BIT_VEX_OPSIZE_ADSIZE); - case IC_64BIT_VEX_OPSIZE_ADSIZE: - return false; + (VEX_LIG && inheritsFrom(child, IC_VEX_L_OPSIZE)); case IC_VEX_W: return VEX_LIG && inheritsFrom(child, IC_VEX_L_W); case IC_VEX_W_XS: @@ -888,9 +881,6 @@ if ((index & ATTR_EVEX) || (index & ATTR_VEX) || (index & ATTR_VEXL)) { if (index & ATTR_EVEX) o << "IC_EVEX"; - else if ((index & (ATTR_64BIT | ATTR_VEXL | ATTR_REXW | ATTR_OPSIZE)) == - (ATTR_64BIT | ATTR_OPSIZE)) - o << "IC_64BIT_VEX"; else o << "IC_VEX"; @@ -902,13 +892,9 @@ if (index & ATTR_REXW) o << "_W"; - if (index & ATTR_OPSIZE) { + if (index & ATTR_OPSIZE) o << "_OPSIZE"; - if ((index & (ATTR_64BIT | ATTR_EVEX | ATTR_VEX | ATTR_VEXL | - ATTR_REXW | ATTR_ADSIZE)) == - (ATTR_64BIT | ATTR_VEX | ATTR_ADSIZE)) - o << "_ADSIZE"; - } else if (index & ATTR_XD) + else if (index & ATTR_XD) o << "_XD"; else if (index & ATTR_XS) o << "_XS"; @@ -922,7 +908,8 @@ if (index & ATTR_EVEXB) o << "_B"; } - } else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XS)) + } + else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XS)) o << "IC_64BIT_REXW_XS"; else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XD)) o << "IC_64BIT_REXW_XD"; diff --git a/llvm/utils/TableGen/X86RecognizableInstr.cpp b/llvm/utils/TableGen/X86RecognizableInstr.cpp --- a/llvm/utils/TableGen/X86RecognizableInstr.cpp +++ b/llvm/utils/TableGen/X86RecognizableInstr.cpp @@ -310,11 +310,6 @@ insnContext = IC_VEX_L_OPSIZE; else if (OpPrefix == X86Local::PD && HasVEX_W) insnContext = IC_VEX_W_OPSIZE; - else if (OpPrefix == X86Local::PD && Is64Bit && - AdSize == X86Local::AdSize32) - insnContext = IC_64BIT_VEX_OPSIZE_ADSIZE; - else if (OpPrefix == X86Local::PD && Is64Bit) - insnContext = IC_64BIT_VEX_OPSIZE; else if (OpPrefix == X86Local::PD) insnContext = IC_VEX_OPSIZE; else if (HasVEX_L && OpPrefix == X86Local::XS)