Index: lib/Target/X86/AsmParser/X86AsmParser.cpp =================================================================== --- lib/Target/X86/AsmParser/X86AsmParser.cpp +++ lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -77,7 +77,7 @@ SwitchMode(X86::Mode32Bit); unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo, matchingInlineAsm, VariantID); - if (Code16GCC) + if (Code16GCC) SwitchMode(X86::Mode16Bit); return rv; } @@ -722,7 +722,8 @@ CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier, - InlineAsmIdentifierInfo &Info); + InlineAsmIdentifierInfo &Info, + bool IsBracketedMemOperand = false); bool parseDirectiveEven(SMLoc L); bool ParseDirectiveWord(unsigned Size, SMLoc L); @@ -765,6 +766,9 @@ bool ParseZ(std::unique_ptr &Z, const SMLoc &StartLoc); + unsigned AdjustAVX512Mem(unsigned Size, OperandVector &Operands, + unsigned UnsizedMemOpIndex); + bool is64BitMode() const { // FIXME: Can tablegen auto-generate this? return getSTI().getFeatureBits()[X86::Mode64Bit]; @@ -1172,7 +1176,7 @@ std::unique_ptr X86AsmParser::CreateMemForInlineAsm( unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier, - InlineAsmIdentifierInfo &Info) { + InlineAsmIdentifierInfo &Info, bool IsBracketedMemOperand) { // If we found a decl other than a VarDecl, then assume it is a FuncDecl or // some other label reference. if (isa(Disp) && Info.OpDecl && !Info.IsVarDecl) { @@ -1201,6 +1205,12 @@ if (Size) InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start, /*Len=*/0, Size); + if (IsBracketedMemOperand) + // handle cases where size qualifier is absent, upon an indirect symbol reference - i.e. "[var]" + // set Size to zero to allow matching mechansim to try and find a better + // size qualifier then our initial guess, based on available variants of + // the given instruction + Size = 0; } } @@ -1487,7 +1497,8 @@ InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo(); return CreateMemForInlineAsm(SegReg, Disp, BaseReg, IndexReg, Scale, Start, - End, Size, SM.getSymName(), Info); + End, Size, SM.getSymName(), Info, + isParsingInlineAsm()); } // Inline assembly may use variable names with namespace alias qualifiers. @@ -2828,6 +2839,35 @@ return true; } +unsigned X86AsmParser::AdjustAVX512Mem(unsigned Size, OperandVector &Operands, + unsigned UnsizedMemOpIndex) { + assert(UnsizedMemOpIndex && "Unsized operand cannot be the alleged mnemonic"); + // Check for the existence of an AVX512 platform + if (!getSTI().getFeatureBits()[X86::FeatureAVX512]) + return 0; + switch (Size) { + // allow adjusting upon a (x|y|z)mm + case 512: + case 256: + case 128: + return Size; + // this is an allegadly broadcasting mem op adjustment, + // allow some more inquiring to validate it + case 64: + case 32: + break; + // currently - do not allow any other type of adjustment + default: + return 0; + } + // if it's indeed a broadcasting adjustment - next token must be a LCurly + if (!(UnsizedMemOpIndex < Operands.size() - 1)) + return 0; + const X86Operand &Op = + static_cast(*Operands[UnsizedMemOpIndex + 1]); + return Op.isToken() && Op.getToken().substr(0, 4).equals("{1to") ? Size : 0; +} + bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, @@ -2847,10 +2887,14 @@ // Find one unsized memory operand, if present. X86Operand *UnsizedMemOp = nullptr; + unsigned UnsizedMemOpIndex = 0; for (const auto &Op : Operands) { X86Operand *X86Op = static_cast(Op.get()); - if (X86Op->isMemUnsized()) + if (X86Op->isMemUnsized()) { UnsizedMemOp = X86Op; + break; + } + UnsizedMemOpIndex++; } // Allow some instructions to have implicitly pointer-sized operands. This is @@ -2896,6 +2940,7 @@ // If an unsized memory operand is present, try to match with each memory // operand size. In Intel assembly, the size is not part of the instruction // mnemonic. + unsigned MatchedSize = 0; if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) { static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512}; for (unsigned Size : MopSizes) { @@ -2907,9 +2952,13 @@ if (Match.empty() || LastOpcode != Inst.getOpcode()) Match.push_back(M); - // If this returned as a missing feature failure, remember that. - if (Match.back() == Match_MissingFeature) - ErrorInfoMissingFeature = ErrorInfoIgnore; + // If this returned as a missing feature failure, remember that. + if (Match.back() == Match_MissingFeature) + ErrorInfoMissingFeature = ErrorInfoIgnore; + if (M == Match_Success) + // Allow a degenerate pan of SIMD related adjustments, to match (G|I)CC + // behavior on respective cases + MatchedSize = AdjustAVX512Mem(Size, Operands, UnsizedMemOpIndex); } // Restore the size of the unsized memory operand if we modified it. @@ -2944,6 +2993,13 @@ unsigned NumSuccessfulMatches = std::count(std::begin(Match), std::end(Match), Match_Success); if (NumSuccessfulMatches == 1) { + if (MatchedSize && isParsingInlineAsm() && isParsingIntelSyntax()) + // MS compatibility - fix the rewrite according to the matched memory size + // We need to perform the fix online for MS inline assembly + for (AsmRewrite &AR : *InstInfo->AsmRewrites) + if ((AR.Loc.getPointer() == UnsizedMemOp->StartLoc.getPointer()) && + (AR.Kind == AOK_SizeDirective)) + AR.Val = MatchedSize; // Some instructions need post-processing to, for example, tweak which // encoding is selected. Loop on it while changes happen so the individual // transformations can chain off each other.