diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -37,6 +37,210 @@ namespace { +enum PrefixKind { None, REX, XOP, VEX2, VEX3, EVEX }; + +static void emitByte(uint8_t C, raw_ostream &OS) { OS << static_cast(C); } + +class X86OpcodePrefixHelper { + // REX (1 byte) + // +-----+ +------+ + // | 40H | | WRXB | + // +-----+ +------+ + + // XOP (3-byte) + // +-----+ +--------------+ +-------------------+ + // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | + // +-----+ +--------------+ +-------------------+ + + // VEX2 (2 bytes) + // +-----+ +-------------------+ + // | C5h | | R | vvvv | L | pp | + // +-----+ +-------------------+ + + // VEX3 (3 bytes) + // +-----+ +--------------+ +-------------------+ + // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | + // +-----+ +--------------+ +-------------------+ + + // VEX_R: opcode externsion equivalent to REX.R in + // 1's complement (inverted) form + // + // 1: Same as REX_R=0 (must be 1 in 32-bit mode) + // 0: Same as REX_R=1 (64 bit mode only) + + // VEX_X: equivalent to REX.X, only used when a + // register is used for index in SIB Byte. + // + // 1: Same as REX.X=0 (must be 1 in 32-bit mode) + // 0: Same as REX.X=1 (64-bit mode only) + + // VEX_B: + // 1: Same as REX_B=0 (ignored in 32-bit mode) + // 0: Same as REX_B=1 (64 bit mode only) + + // VEX_W: opcode specific (use like REX.W, or used for + // opcode extension, or ignored, depending on the opcode byte) + + // VEX_5M (VEX m-mmmmm field): + // + // 0b00000: Reserved for future use + // 0b00001: implied 0F leading opcode + // 0b00010: implied 0F 38 leading opcode bytes + // 0b00011: implied 0F 3A leading opcode bytes + // 0b00100: Reserved for future use + // 0b00101: VEX MAP5 + // 0b00110: VEX MAP6 + // 0b00111-0b11111: Reserved for future use + // 0b01000: XOP map select - 08h instructions with imm byte + // 0b01001: XOP map select - 09h instructions with no imm byte + // 0b01010: XOP map select - 0Ah instructions with imm dword + + // VEX_4V (VEX vvvv field): a register specifier + // (in 1's complement form) or 1111 if unused. + + // VEX_PP: opcode extension providing equivalent + // functionality of a SIMD prefix + // 0b00: None + // 0b01: 66 + // 0b10: F3 + // 0b11: F2 + + // EVEX (4 bytes) + // +-----+ +--------------+ +-------------------+ +------------------------+ + // | 62h | | RXBR' | 00mm | | W | vvvv | 1 | pp | | z | L'L | b | v' | aaa | + // +-----+ +--------------+ +-------------------+ +------------------------+ + + // EVEX_L2/VEX_L (Vector Length): + // L2 L + // 0 0: scalar or 128-bit vector + // 0 1: 256-bit vector + // 1 0: 512-bit vector + +private: + unsigned W : 1; + unsigned R : 1; + unsigned X : 1; + unsigned B : 1; + unsigned VEX_4V : 4; + unsigned VEX_L : 1; + unsigned VEX_PP : 2; + unsigned VEX_5M : 5; + unsigned EVEX_R2 : 1; + unsigned EVEX_z : 1; + unsigned EVEX_L2 : 1; + unsigned EVEX_b : 1; + unsigned EVEX_V2 : 1; + unsigned EVEX_aaa : 3; + PrefixKind Kind = None; + const MCRegisterInfo &MRI; + + unsigned getRegEncoding(const MCInst &MI, unsigned OpNum) const { + return MRI.getEncodingValue(MI.getOperand(OpNum).getReg()); + } + + void setR(unsigned Encoding) { R = Encoding >> 3 & 1; } + void setR2(unsigned Encoding) { EVEX_R2 = Encoding >> 4 & 1; } + void set4V(unsigned Encoding) { VEX_4V = Encoding & 0xf; } + void setV2(unsigned Encoding) { EVEX_V2 = Encoding >> 4 & 1; } + +public: + void setW(bool V) { W = V; } + void setR(const MCInst &MI, unsigned OpNum) { + setR(getRegEncoding(MI, OpNum)); + } + void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) { + X = getRegEncoding(MI, OpNum) >> Shift & 1; + } + void setB(const MCInst &MI, unsigned OpNum) { + B = getRegEncoding(MI, OpNum) >> 3 & 1; + } + void set4V(const MCInst &MI, unsigned OpNum) { + set4V(getRegEncoding(MI, OpNum)); + } + void setL(bool V) { VEX_L = V; } + void setPP(unsigned V) { VEX_PP = V; } + void set5M(unsigned V) { VEX_5M = V; } + void setR2(const MCInst &MI, unsigned OpNum) { + setR2(getRegEncoding(MI, OpNum)); + } + void setRR2(const MCInst &MI, unsigned OpNum) { + unsigned Encoding = getRegEncoding(MI, OpNum); + setR(Encoding); + setR2(Encoding); + } + void setZ(bool V) { EVEX_z = V; } + void setL2(bool V) { EVEX_L2 = V; } + void setEVEX_b(bool V) { EVEX_b = V; } + void setV2(const MCInst &MI, unsigned OpNum) { + setV2(getRegEncoding(MI, OpNum)); + } + void set4VV2(const MCInst &MI, unsigned OpNum) { + unsigned Encoding = getRegEncoding(MI, OpNum); + set4V(Encoding); + setV2(Encoding); + } + void setAAA(const MCInst &MI, unsigned OpNum) { + EVEX_aaa = getRegEncoding(MI, OpNum); + } + + X86OpcodePrefixHelper(const MCRegisterInfo &MRI) + : W(0), R(0), X(0), B(0), VEX_4V(0), VEX_L(0), VEX_PP(0), VEX_5M(0), + EVEX_R2(0), EVEX_z(0), EVEX_L2(0), EVEX_b(0), EVEX_V2(0), EVEX_aaa(0), + MRI(MRI) {} + + void setLowerBound(PrefixKind K) { Kind = K; } + + PrefixKind determineOptimalKind() { + switch (Kind) { + case None: + Kind = (W | R | X | B) ? REX : None; + break; + case REX: + case XOP: + case VEX3: + case EVEX: + break; + case VEX2: + Kind = (W | X | B | (VEX_5M != 1)) ? VEX3 : VEX2; + break; + } + return Kind; + } + + void emit(raw_ostream &OS) const { + uint8_t FirstPayload = + ((~R) & 0x1) << 7 | ((~X) & 0x1) << 6 | ((~B) & 0x1) << 5; + uint8_t LastPayload = ((~VEX_4V) & 0xf) << 3 | VEX_L << 2 | VEX_PP; + switch (Kind) { + case None: + return; + case REX: + emitByte(0x40 | W << 3 | R << 2 | X << 1 | B, OS); + return; + case VEX2: + emitByte(0xC5, OS); + emitByte(~R << 7 | LastPayload, OS); + return; + case VEX3: + case XOP: + emitByte(Kind == VEX3 ? 0xC4 : 0x8F, OS); + emitByte(FirstPayload | VEX_5M, OS); + emitByte(W << 7 | LastPayload, OS); + return; + case EVEX: + assert(VEX_5M & 0x7 && + "More than 3 significant bits in VEX.m-mmmm fields for EVEX!"); + emitByte(0x62, OS); + emitByte(FirstPayload | ((~EVEX_R2) & 0x1) << 4 | VEX_5M, OS); + emitByte(W << 7 | ((~VEX_4V) & 0xf) << 3 | 1 << 2 | VEX_PP, OS); + emitByte(EVEX_z << 7 | EVEX_L2 << 6 | VEX_L << 5 | EVEX_b << 4 | + ((~EVEX_V2) & 0x1) << 3 | EVEX_aaa, + OS); + return; + } + } +}; + class X86MCCodeEmitter : public MCCodeEmitter { const MCInstrInfo &MCII; MCContext &Ctx; @@ -60,12 +264,6 @@ unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const; - /// \param MI a single low-level machine instruction. - /// \param OpNum the operand #. - /// \returns true if the OpNumth operand of MI require a bit to be set in - /// REX prefix. - bool isREXExtendedReg(const MCInst &MI, unsigned OpNum) const; - void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize, MCFixupKind FixupKind, uint64_t StartByte, raw_ostream &OS, SmallVectorImpl &Fixups, int ImmOffset = 0) const; @@ -77,25 +275,26 @@ raw_ostream &OS) const; void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField, - uint64_t TSFlags, bool HasREX, uint64_t StartByte, + uint64_t TSFlags, PrefixKind Kind, uint64_t StartByte, raw_ostream &OS, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI, bool ForceSIB = false) const; - bool emitPrefixImpl(unsigned &CurOp, const MCInst &MI, - const MCSubtargetInfo &STI, raw_ostream &OS) const; + PrefixKind emitPrefixImpl(unsigned &CurOp, const MCInst &MI, + const MCSubtargetInfo &STI, raw_ostream &OS) const; - void emitVEXOpcodePrefix(int MemOperand, const MCInst &MI, - raw_ostream &OS) const; + PrefixKind emitVEXOpcodePrefix(int MemOperand, const MCInst &MI, + raw_ostream &OS) const; void emitSegmentOverridePrefix(unsigned SegOperand, const MCInst &MI, raw_ostream &OS) const; - bool emitOpcodePrefix(int MemOperand, const MCInst &MI, - const MCSubtargetInfo &STI, raw_ostream &OS) const; + PrefixKind emitOpcodePrefix(int MemOperand, const MCInst &MI, + const MCSubtargetInfo &STI, + raw_ostream &OS) const; - bool emitREXPrefix(int MemOperand, const MCInst &MI, - const MCSubtargetInfo &STI, raw_ostream &OS) const; + PrefixKind emitREXPrefix(int MemOperand, const MCInst &MI, + const MCSubtargetInfo &STI, raw_ostream &OS) const; }; } // end anonymous namespace @@ -105,8 +304,6 @@ return RM | (RegOpcode << 3) | (Mod << 6); } -static void emitByte(uint8_t C, raw_ostream &OS) { OS << static_cast(C); } - static void emitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) { // Output the constant in little endian byte order. for (unsigned i = 0; i != Size; ++i) { @@ -218,15 +415,6 @@ return Ctx.getRegisterInfo()->getEncodingValue(MI.getOperand(OpNum).getReg()); } -/// \param MI a single low-level machine instruction. -/// \param OpNum the operand #. -/// \returns true if the OpNumth operand of MI require a bit to be set in -/// REX prefix. -bool X86MCCodeEmitter::isREXExtendedReg(const MCInst &MI, - unsigned OpNum) const { - return (getX86RegEncoding(MI, OpNum) >> 3) & 1; -} - void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size, MCFixupKind FixupKind, uint64_t StartByte, raw_ostream &OS, @@ -319,7 +507,7 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField, - uint64_t TSFlags, bool HasREX, + uint64_t TSFlags, PrefixKind Kind, uint64_t StartByte, raw_ostream &OS, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI, @@ -355,7 +543,7 @@ // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a // special case because COFF and Mach-O don't support ELF's more // flexible R_X86_64_REX_GOTPCRELX relaxation. - assert(HasREX); + assert(Kind == REX); return X86::reloc_riprel_4byte_movq_load; case X86::ADC32rm: case X86::ADD32rm: @@ -379,8 +567,8 @@ case X86::SBB64rm: case X86::SUB64rm: case X86::XOR64rm: - return HasREX ? X86::reloc_riprel_4byte_relax_rex - : X86::reloc_riprel_4byte_relax; + return Kind == REX ? X86::reloc_riprel_4byte_relax_rex + : X86::reloc_riprel_4byte_relax; } }(); @@ -592,10 +780,11 @@ /// Emit all instruction prefixes. /// -/// \returns true if REX prefix is used, otherwise returns false. -bool X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI, - const MCSubtargetInfo &STI, - raw_ostream &OS) const { +/// \returns one of the REX, XOP, VEX2, VEX3, EVEX if any of them is used, +/// otherwise returns None. +PrefixKind X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI, + const MCSubtargetInfo &STI, + raw_ostream &OS) const { uint64_t TSFlags = MCII.get(MI.getOpcode()).TSFlags; // Determine where the memory operand starts, if present. int MemoryOperand = X86II::getMemoryOperandNo(TSFlags); @@ -648,154 +837,102 @@ // REX prefix is optional, but if used must be immediately before the opcode // Encoding type for this instruction. - uint64_t Encoding = TSFlags & X86II::EncodingMask; - bool HasREX = false; - if (Encoding) - emitVEXOpcodePrefix(MemoryOperand, MI, OS); - else - HasREX = emitOpcodePrefix(MemoryOperand, MI, STI, OS); - - return HasREX; + return (TSFlags & X86II::EncodingMask) + ? emitVEXOpcodePrefix(MemoryOperand, MI, OS) + : emitOpcodePrefix(MemoryOperand, MI, STI, OS); } -/// AVX instructions are encoded using a opcode prefix called VEX. -void X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI, - raw_ostream &OS) const { +// AVX instructions are encoded using an encoding scheme that combines +// prefix bytes, opcode extension field, operand encoding fields, and vector +// length encoding capability into a new prefix, referred to as VEX. + +// The majority of the AVX-512 family of instructions (operating on +// 512/256/128-bit vector register operands) are encoded using a new prefix +// (called EVEX). + +// XOP is a revised subset of what was originally intended as SSE5. It was +// changed to be similar but not overlapping with AVX. + +/// Emit XOP, VEX2, VEX3 or EVEX prefix. +/// \returns the used prefix. +PrefixKind X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, + const MCInst &MI, + raw_ostream &OS) const { const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); uint64_t TSFlags = Desc.TSFlags; assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX."); - uint64_t Encoding = TSFlags & X86II::EncodingMask; + X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo()); + switch (TSFlags & X86II::EncodingMask) { + default: + break; + case X86II::XOP: + Prefix.setLowerBound(XOP); + break; + case X86II::VEX: + // VEX can be 2 byte or 3 byte, not determined yet if not explicit + Prefix.setLowerBound(MI.getFlags() & X86::IP_USE_VEX3 ? VEX3 : VEX2); + break; + case X86II::EVEX: + Prefix.setLowerBound(EVEX); + break; + } + + Prefix.setW(TSFlags & X86II::VEX_W); + bool HasEVEX_K = TSFlags & X86II::EVEX_K; bool HasVEX_4V = TSFlags & X86II::VEX_4V; bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; - // VEX_R: opcode externsion equivalent to REX.R in - // 1's complement (inverted) form - // - // 1: Same as REX_R=0 (must be 1 in 32-bit mode) - // 0: Same as REX_R=1 (64 bit mode only) - // - uint8_t VEX_R = 0x1; - uint8_t EVEX_R2 = 0x1; - - // VEX_X: equivalent to REX.X, only used when a - // register is used for index in SIB Byte. - // - // 1: Same as REX.X=0 (must be 1 in 32-bit mode) - // 0: Same as REX.X=1 (64-bit mode only) - uint8_t VEX_X = 0x1; - - // VEX_B: - // - // 1: Same as REX_B=0 (ignored in 32-bit mode) - // 0: Same as REX_B=1 (64 bit mode only) - // - uint8_t VEX_B = 0x1; - - // VEX_W: opcode specific (use like REX.W, or used for - // opcode extension, or ignored, depending on the opcode byte) - uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0; - - // VEX_5M (VEX m-mmmmm field): - // - // 0b00000: Reserved for future use - // 0b00001: implied 0F leading opcode - // 0b00010: implied 0F 38 leading opcode bytes - // 0b00011: implied 0F 3A leading opcode bytes - // 0b00100: Reserved for future use - // 0b00101: VEX MAP5 - // 0b00110: VEX MAP6 - // 0b00111-0b11111: Reserved for future use - // 0b01000: XOP map select - 08h instructions with imm byte - // 0b01001: XOP map select - 09h instructions with no imm byte - // 0b01010: XOP map select - 0Ah instructions with imm dword - uint8_t VEX_5M; switch (TSFlags & X86II::OpMapMask) { default: llvm_unreachable("Invalid prefix!"); case X86II::TB: - VEX_5M = 0x1; - break; // 0F + Prefix.set5M(0x1); // 0F + break; case X86II::T8: - VEX_5M = 0x2; - break; // 0F 38 + Prefix.set5M(0x2); // 0F 38 + break; case X86II::TA: - VEX_5M = 0x3; - break; // 0F 3A + Prefix.set5M(0x3); // 0F 3A + break; case X86II::XOP8: - VEX_5M = 0x8; + Prefix.set5M(0x8); break; case X86II::XOP9: - VEX_5M = 0x9; + Prefix.set5M(0x9); break; case X86II::XOPA: - VEX_5M = 0xA; + Prefix.set5M(0xA); break; case X86II::T_MAP5: - VEX_5M = 0x5; + Prefix.set5M(0x5); break; case X86II::T_MAP6: - VEX_5M = 0x6; + Prefix.set5M(0x6); break; } - // VEX_4V (VEX vvvv field): a register specifier - // (in 1's complement form) or 1111 if unused. - uint8_t VEX_4V = 0xf; - uint8_t EVEX_V2 = 0x1; - - // EVEX_L2/VEX_L (Vector Length): - // - // L2 L - // 0 0: scalar or 128-bit vector - // 0 1: 256-bit vector - // 1 0: 512-bit vector - // - uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0; - uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0; - - // VEX_PP: opcode extension providing equivalent - // functionality of a SIMD prefix - // - // 0b00: None - // 0b01: 66 - // 0b10: F3 - // 0b11: F2 - // - uint8_t VEX_PP = 0; + Prefix.setL(TSFlags & X86II::VEX_L); + Prefix.setL2(TSFlags & X86II::EVEX_L2); switch (TSFlags & X86II::OpPrefixMask) { case X86II::PD: - VEX_PP = 0x1; - break; // 66 + Prefix.setPP(0x1); // 66 + break; case X86II::XS: - VEX_PP = 0x2; - break; // F3 + Prefix.setPP(0x2); // F3 + break; case X86II::XD: - VEX_PP = 0x3; - break; // F2 + Prefix.setPP(0x3); // F2 + break; } - // EVEX_U - uint8_t EVEX_U = 1; // Always '1' so far - - // EVEX_z - uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0; - - // EVEX_b - uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0; - - // EVEX_rc - uint8_t EVEX_rc = 0; - - // EVEX_aaa - uint8_t EVEX_aaa = 0; + Prefix.setZ(HasEVEX_K && (TSFlags & X86II::EVEX_Z)); + Prefix.setEVEX_b(TSFlags & X86II::EVEX_B); bool EncodeRC = false; - - // Classify VEX_B, VEX_4V, VEX_R, VEX_X - unsigned NumOps = Desc.getNumOperands(); + uint8_t EVEX_rc = 0; unsigned CurOp = X86II::getOperandBias(Desc); switch (TSFlags & X86II::FormMask) { @@ -803,19 +940,11 @@ llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!"); case X86II::MRMDestMem4VOp3CC: { // MemAddr, src1(ModR/M), src2(VEX_4V) - unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); - VEX_B = ~(BaseRegEnc >> 3) & 1; - unsigned IndexRegEnc = - getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); - VEX_X = ~(IndexRegEnc >> 3) & 1; - + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); CurOp += X86::AddrNumOperands; - - unsigned RegEnc = getX86RegEncoding(MI, ++CurOp); - VEX_R = ~(RegEnc >> 3) & 1; - - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; + Prefix.setR(MI, ++CurOp); + Prefix.set4V(MI, CurOp++); break; } case X86II::MRM_C0: @@ -829,28 +958,20 @@ // MemAddr, src1(VEX_4V), src2(ModR/M) // MemAddr, src1(ModR/M), imm8 // - unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); - VEX_B = ~(BaseRegEnc >> 3) & 1; - unsigned IndexRegEnc = - getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); - VEX_X = ~(IndexRegEnc >> 3) & 1; + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. - EVEX_V2 = ~(IndexRegEnc >> 4) & 1; + Prefix.setV2(MI, MemOperand + X86::AddrIndexReg); CurOp += X86::AddrNumOperands; if (HasEVEX_K) - EVEX_aaa = getX86RegEncoding(MI, CurOp++); + Prefix.setAAA(MI, CurOp++); - if (HasVEX_4V) { - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; - EVEX_V2 = ~(VRegEnc >> 4) & 1; - } + if (HasVEX_4V) + Prefix.set4VV2(MI, CurOp++); - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - EVEX_R2 = ~(RegEnc >> 4) & 1; + Prefix.setRR2(MI, CurOp++); break; } case X86II::MRMSrcMemFSIB: @@ -863,57 +984,36 @@ // // FMA4: // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - EVEX_R2 = ~(RegEnc >> 4) & 1; + Prefix.setRR2(MI, CurOp++); if (HasEVEX_K) - EVEX_aaa = getX86RegEncoding(MI, CurOp++); + Prefix.setAAA(MI, CurOp++); - if (HasVEX_4V) { - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; - EVEX_V2 = ~(VRegEnc >> 4) & 1; - } + if (HasVEX_4V) + Prefix.set4VV2(MI, CurOp++); - unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); - VEX_B = ~(BaseRegEnc >> 3) & 1; - unsigned IndexRegEnc = - getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); - VEX_X = ~(IndexRegEnc >> 3) & 1; + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. - EVEX_V2 = ~(IndexRegEnc >> 4) & 1; + Prefix.setV2(MI, MemOperand + X86::AddrIndexReg); break; } case X86II::MRMSrcMem4VOp3: { // Instruction format for 4VOp3: // src1(ModR/M), MemAddr, src3(VEX_4V) - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - - unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); - VEX_B = ~(BaseRegEnc >> 3) & 1; - unsigned IndexRegEnc = - getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); - VEX_X = ~(IndexRegEnc >> 3) & 1; - - VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf; + Prefix.setR(MI, CurOp++); + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); + Prefix.set4V(MI, CurOp + X86::AddrNumOperands); break; } case X86II::MRMSrcMemOp4: { // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; - - unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); - VEX_B = ~(BaseRegEnc >> 3) & 1; - unsigned IndexRegEnc = - getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); - VEX_X = ~(IndexRegEnc >> 3) & 1; + Prefix.setR(MI, CurOp++); + Prefix.set4V(MI, CurOp++); + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); break; } case X86II::MRM0m: @@ -927,22 +1027,16 @@ // MRM[0-9]m instructions forms: // MemAddr // src1(VEX_4V), MemAddr - if (HasVEX_4V) { - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; - EVEX_V2 = ~(VRegEnc >> 4) & 1; - } + if (HasVEX_4V) + Prefix.set4VV2(MI, CurOp++); if (HasEVEX_K) - EVEX_aaa = getX86RegEncoding(MI, CurOp++); + Prefix.setAAA(MI, CurOp++); - unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); - VEX_B = ~(BaseRegEnc >> 3) & 1; - unsigned IndexRegEnc = - getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); - VEX_X = ~(IndexRegEnc >> 3) & 1; + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. - EVEX_V2 = ~(IndexRegEnc >> 4) & 1; + Prefix.setV2(MI, MemOperand + X86::AddrIndexReg); break; } @@ -954,25 +1048,21 @@ // // FMA4: // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - EVEX_R2 = ~(RegEnc >> 4) & 1; + Prefix.setRR2(MI, CurOp++); if (HasEVEX_K) - EVEX_aaa = getX86RegEncoding(MI, CurOp++); + Prefix.setAAA(MI, CurOp++); - if (HasVEX_4V) { - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; - EVEX_V2 = ~(VRegEnc >> 4) & 1; - } + if (HasVEX_4V) + Prefix.set4VV2(MI, CurOp++); - RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_B = ~(RegEnc >> 3) & 1; - VEX_X = ~(RegEnc >> 4) & 1; + Prefix.setB(MI, CurOp); + Prefix.setX(MI, CurOp, 4); + ++CurOp; - if (EVEX_b) { + if (TSFlags & X86II::EVEX_B) { if (HasEVEX_RC) { + unsigned NumOps = Desc.getNumOperands(); unsigned RcOperand = NumOps - 1; assert(RcOperand >= CurOp); EVEX_rc = MI.getOperand(RcOperand).getImm(); @@ -985,29 +1075,21 @@ case X86II::MRMSrcReg4VOp3: { // Instruction format for 4VOp3: // src1(ModR/M), src2(ModR/M), src3(VEX_4V) - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - - RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_B = ~(RegEnc >> 3) & 1; - - VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf; + Prefix.setR(MI, CurOp++); + Prefix.setB(MI, CurOp++); + Prefix.set4V(MI, CurOp++); break; } case X86II::MRMSrcRegOp4: { // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; - + Prefix.setR(MI, CurOp++); + Prefix.set4V(MI, CurOp++); // Skip second register source (encoded in Imm[7:4]) ++CurOp; - RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_B = ~(RegEnc >> 3) & 1; - VEX_X = ~(RegEnc >> 4) & 1; + Prefix.setB(MI, CurOp); + Prefix.setX(MI, CurOp, 4); + ++CurOp; break; } case X86II::MRMDestReg: { @@ -1015,23 +1097,18 @@ // dst(ModR/M), src(ModR/M) // dst(ModR/M), src(ModR/M), imm8 // dst(ModR/M), src1(VEX_4V), src2(ModR/M) - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_B = ~(RegEnc >> 3) & 1; - VEX_X = ~(RegEnc >> 4) & 1; + Prefix.setB(MI, CurOp); + Prefix.setX(MI, CurOp, 4); + ++CurOp; if (HasEVEX_K) - EVEX_aaa = getX86RegEncoding(MI, CurOp++); + Prefix.setAAA(MI, CurOp++); - if (HasVEX_4V) { - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; - EVEX_V2 = ~(VRegEnc >> 4) & 1; - } + if (HasVEX_4V) + Prefix.set4VV2(MI, CurOp++); - RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - EVEX_R2 = ~(RegEnc >> 4) & 1; - if (EVEX_b) + Prefix.setRR2(MI, CurOp++); + if (TSFlags & X86II::EVEX_B) EncodeRC = true; break; } @@ -1039,9 +1116,7 @@ // MRMr0 instructions forms: // 11:rrr:000 // dst(ModR/M) - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_R = ~(RegEnc >> 3) & 1; - EVEX_R2 = ~(RegEnc >> 4) & 1; + Prefix.setRR2(MI, CurOp++); break; } case X86II::MRM0r: @@ -1054,75 +1129,25 @@ case X86II::MRM7r: { // MRM0r-MRM7r instructions forms: // dst(VEX_4V), src(ModR/M), imm8 - if (HasVEX_4V) { - unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); - VEX_4V = ~VRegEnc & 0xf; - EVEX_V2 = ~(VRegEnc >> 4) & 1; - } + if (HasVEX_4V) + Prefix.set4VV2(MI, CurOp++); + if (HasEVEX_K) - EVEX_aaa = getX86RegEncoding(MI, CurOp++); + Prefix.setAAA(MI, CurOp++); - unsigned RegEnc = getX86RegEncoding(MI, CurOp++); - VEX_B = ~(RegEnc >> 3) & 1; - VEX_X = ~(RegEnc >> 4) & 1; + Prefix.setB(MI, CurOp); + Prefix.setX(MI, CurOp, 4); + ++CurOp; break; } } - - if (Encoding == X86II::VEX || Encoding == X86II::XOP) { - // VEX opcode prefix can have 2 or 3 bytes - // - // 3 bytes: - // +-----+ +--------------+ +-------------------+ - // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | - // +-----+ +--------------+ +-------------------+ - // 2 bytes: - // +-----+ +-------------------+ - // | C5h | | R | vvvv | L | pp | - // +-----+ +-------------------+ - // - // XOP uses a similar prefix: - // +-----+ +--------------+ +-------------------+ - // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | - // +-----+ +--------------+ +-------------------+ - uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); - - // Can we use the 2 byte VEX prefix? - if (!(MI.getFlags() & X86::IP_USE_VEX3) && Encoding == X86II::VEX && - VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { - emitByte(0xC5, OS); - emitByte(LastByte | (VEX_R << 7), OS); - return; - } - - // 3 byte VEX prefix - emitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, OS); - emitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, OS); - emitByte(LastByte | (VEX_W << 7), OS); - } else { - assert(Encoding == X86II::EVEX && "unknown encoding!"); - // EVEX opcode prefix can have 4 bytes - // - // +-----+ +--------------+ +-------------------+ +------------------------+ - // | 62h | | RXBR' | 0mmm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa | - // +-----+ +--------------+ +-------------------+ +------------------------+ - assert((VEX_5M & 0x7) == VEX_5M && - "More than 3 significant bits in VEX.m-mmmm fields for EVEX!"); - - emitByte(0x62, OS); - emitByte((VEX_R << 7) | (VEX_X << 6) | (VEX_B << 5) | (EVEX_R2 << 4) | - VEX_5M, - OS); - emitByte((VEX_W << 7) | (VEX_4V << 3) | (EVEX_U << 2) | VEX_PP, OS); - if (EncodeRC) - emitByte((EVEX_z << 7) | (EVEX_rc << 5) | (EVEX_b << 4) | (EVEX_V2 << 3) | - EVEX_aaa, - OS); - else - emitByte((EVEX_z << 7) | (EVEX_L2 << 6) | (VEX_L << 5) | (EVEX_b << 4) | - (EVEX_V2 << 3) | EVEX_aaa, - OS); + if (EncodeRC) { + Prefix.setL(EVEX_rc & 0x1); + Prefix.setL2(EVEX_rc & 0x2); } + PrefixKind Kind = Prefix.determineOptimalKind(); + Prefix.emit(OS); + return Kind; } /// Emit REX prefix which specifies @@ -1130,120 +1155,109 @@ /// 2) non-default operand size, and /// 3) use of X86-64 extended registers. /// -/// \returns true if REX prefix is used, otherwise returns false. -bool X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI, - const MCSubtargetInfo &STI, - raw_ostream &OS) const { - uint8_t REX = [&, MemOperand]() { - uint8_t REX = 0; - bool UsesHighByteReg = false; - - const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); - uint64_t TSFlags = Desc.TSFlags; - - if (TSFlags & X86II::REX_W) - REX |= 1 << 3; // set REX.W - - if (MI.getNumOperands() == 0) - return REX; - - unsigned NumOps = MI.getNumOperands(); - unsigned CurOp = X86II::getOperandBias(Desc); - - // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. - for (unsigned i = CurOp; i != NumOps; ++i) { - const MCOperand &MO = MI.getOperand(i); - if (MO.isReg()) { - unsigned Reg = MO.getReg(); - if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || - Reg == X86::DH) - UsesHighByteReg = true; - if (X86II::isX86_64NonExtLowByteReg(Reg)) - // FIXME: The caller of determineREXPrefix slaps this prefix onto - // anything that returns non-zero. - REX |= 0x40; // REX fixed encoding prefix - } else if (MO.isExpr() && STI.getTargetTriple().isX32()) { - // GOTTPOFF and TLSDESC relocations require a REX prefix to allow - // linker optimizations: even if the instructions we see may not require - // any prefix, they may be replaced by instructions that do. This is - // handled as a special case here so that it also works for hand-written - // assembly without the user needing to write REX, as with GNU as. - const auto *Ref = dyn_cast(MO.getExpr()); - if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF || - Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) { - REX |= 0x40; // REX fixed encoding prefix - } +/// \returns the used prefix (REX or None). +PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI, + const MCSubtargetInfo &STI, + raw_ostream &OS) const { + if (!STI.hasFeature(X86::Is64Bit)) + return None; + X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo()); + bool UsesHighByteReg = false; + const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); + uint64_t TSFlags = Desc.TSFlags; + Prefix.setW(TSFlags & X86II::REX_W); + unsigned NumOps = MI.getNumOperands(); + if (!NumOps) { + PrefixKind Kind = Prefix.determineOptimalKind(); + Prefix.emit(OS); + return Kind; + } + unsigned CurOp = X86II::getOperandBias(Desc); + for (unsigned i = CurOp; i != NumOps; ++i) { + const MCOperand &MO = MI.getOperand(i); + if (MO.isReg()) { + unsigned Reg = MO.getReg(); + if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH) + UsesHighByteReg = true; + // If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix. + if (X86II::isX86_64NonExtLowByteReg(Reg)) + Prefix.setLowerBound(REX); + } else if (MO.isExpr() && STI.getTargetTriple().isX32()) { + // GOTTPOFF and TLSDESC relocations require a REX prefix to allow + // linker optimizations: even if the instructions we see may not require + // any prefix, they may be replaced by instructions that do. This is + // handled as a special case here so that it also works for hand-written + // assembly without the user needing to write REX, as with GNU as. + const auto *Ref = dyn_cast(MO.getExpr()); + if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF || + Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) { + Prefix.setLowerBound(REX); } } - - switch (TSFlags & X86II::FormMask) { - case X86II::AddRegFrm: - REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B - break; - case X86II::MRMSrcReg: - case X86II::MRMSrcRegCC: - REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R - REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B - break; - case X86II::MRMSrcMem: - case X86II::MRMSrcMemCC: - REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R - REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B - REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X - CurOp += X86::AddrNumOperands; - break; - case X86II::MRMDestReg: - REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B - REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R - break; - case X86II::MRMDestMem: - REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B - REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X - CurOp += X86::AddrNumOperands; - REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R - break; - case X86II::MRMXmCC: - case X86II::MRMXm: - case X86II::MRM0m: - case X86II::MRM1m: - case X86II::MRM2m: - case X86II::MRM3m: - case X86II::MRM4m: - case X86II::MRM5m: - case X86II::MRM6m: - case X86II::MRM7m: - REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B - REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X - break; - case X86II::MRMXrCC: - case X86II::MRMXr: - case X86II::MRM0r: - case X86II::MRM1r: - case X86II::MRM2r: - case X86II::MRM3r: - case X86II::MRM4r: - case X86II::MRM5r: - case X86II::MRM6r: - case X86II::MRM7r: - REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B - break; - case X86II::MRMr0: - REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R - break; - case X86II::MRMDestMemFSIB: - llvm_unreachable("FSIB format never need REX prefix!"); - } - if (REX && UsesHighByteReg) - report_fatal_error( - "Cannot encode high byte register in REX-prefixed instruction"); - return REX; - }(); - - if (!REX) - return false; - - emitByte(0x40 | REX, OS); - return true; + } + switch (TSFlags & X86II::FormMask) { + case X86II::AddRegFrm: + Prefix.setB(MI, CurOp++); + break; + case X86II::MRMSrcReg: + case X86II::MRMSrcRegCC: + Prefix.setR(MI, CurOp++); + Prefix.setB(MI, CurOp++); + break; + case X86II::MRMSrcMem: + case X86II::MRMSrcMemCC: + Prefix.setR(MI, CurOp++); + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); + CurOp += X86::AddrNumOperands; + break; + case X86II::MRMDestReg: + Prefix.setB(MI, CurOp++); + Prefix.setR(MI, CurOp++); + break; + case X86II::MRMDestMem: + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); + CurOp += X86::AddrNumOperands; + Prefix.setR(MI, CurOp++); + break; + case X86II::MRMXmCC: + case X86II::MRMXm: + case X86II::MRM0m: + case X86II::MRM1m: + case X86II::MRM2m: + case X86II::MRM3m: + case X86II::MRM4m: + case X86II::MRM5m: + case X86II::MRM6m: + case X86II::MRM7m: + Prefix.setB(MI, MemOperand + X86::AddrBaseReg); + Prefix.setX(MI, MemOperand + X86::AddrIndexReg); + break; + case X86II::MRMXrCC: + case X86II::MRMXr: + case X86II::MRM0r: + case X86II::MRM1r: + case X86II::MRM2r: + case X86II::MRM3r: + case X86II::MRM4r: + case X86II::MRM5r: + case X86II::MRM6r: + case X86II::MRM7r: + Prefix.setB(MI, CurOp++); + break; + case X86II::MRMr0: + Prefix.setR(MI, CurOp++); + break; + case X86II::MRMDestMemFSIB: + llvm_unreachable("FSIB format never need REX prefix!"); + } + PrefixKind Kind = Prefix.determineOptimalKind(); + if (Kind && UsesHighByteReg) + report_fatal_error( + "Cannot encode high byte register in REX-prefixed instruction"); + Prefix.emit(OS); + return Kind; } /// Emit segment override opcode prefix as needed. @@ -1260,8 +1274,8 @@ /// \param MemOperand the operand # of the start of a memory operand if present. /// If not present, it is -1. /// -/// \returns true if REX prefix is used, otherwise returns false. -bool X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI, +/// \returns the used prefix (REX or None). +PrefixKind X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI, const MCSubtargetInfo &STI, raw_ostream &OS) const { const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); @@ -1295,9 +1309,7 @@ // Handle REX prefix. assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) && "REX.W requires 64bit mode."); - bool HasREX = STI.hasFeature(X86::Is64Bit) - ? emitREXPrefix(MemOperand, MI, STI, OS) - : false; + PrefixKind Kind = emitREXPrefix(MemOperand, MI, STI, OS); // 0x0F escape code must be emitted just before the opcode. switch (TSFlags & X86II::OpMapMask) { @@ -1318,7 +1330,7 @@ break; } - return HasREX; + return Kind; } void X86MCCodeEmitter::emitPrefix(const MCInst &MI, raw_ostream &OS, @@ -1352,7 +1364,7 @@ uint64_t StartByte = OS.tell(); - bool HasREX = emitPrefixImpl(CurOp, MI, STI, OS); + PrefixKind Kind = emitPrefixImpl(CurOp, MI, STI, OS); // It uses the VEX.VVVV field? bool HasVEX_4V = TSFlags & X86II::VEX_4V; @@ -1451,7 +1463,7 @@ emitByte(BaseOpcode + CC, OS); unsigned SrcRegNum = CurOp + X86::AddrNumOperands; emitMemModRMByte(MI, CurOp + 1, getX86RegNum(MI.getOperand(0)), TSFlags, - HasREX, StartByte, OS, Fixups, STI, false); + Kind, StartByte, OS, Fixups, STI, false); CurOp = SrcRegNum + 3; // skip reg, VEX_V4 and CC break; } @@ -1468,7 +1480,7 @@ bool ForceSIB = (Form == X86II::MRMDestMemFSIB); emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(SrcRegNum)), TSFlags, - HasREX, StartByte, OS, Fixups, STI, ForceSIB); + Kind, StartByte, OS, Fixups, STI, ForceSIB); CurOp = SrcRegNum + 1; break; } @@ -1543,7 +1555,7 @@ bool ForceSIB = (Form == X86II::MRMSrcMemFSIB); emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), - TSFlags, HasREX, StartByte, OS, Fixups, STI, ForceSIB); + TSFlags, Kind, StartByte, OS, Fixups, STI, ForceSIB); CurOp = FirstMemOp + X86::AddrNumOperands; if (HasVEX_I8Reg) I8RegNum = getX86RegEncoding(MI, CurOp++); @@ -1555,7 +1567,7 @@ emitByte(BaseOpcode, OS); emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), - TSFlags, HasREX, StartByte, OS, Fixups, STI); + TSFlags, Kind, StartByte, OS, Fixups, STI); CurOp = FirstMemOp + X86::AddrNumOperands; ++CurOp; // Encoded in VEX.VVVV. break; @@ -1572,7 +1584,7 @@ emitByte(BaseOpcode, OS); emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), - TSFlags, HasREX, StartByte, OS, Fixups, STI); + TSFlags, Kind, StartByte, OS, Fixups, STI); CurOp = FirstMemOp + X86::AddrNumOperands; break; } @@ -1585,7 +1597,7 @@ emitByte(BaseOpcode + CC, OS); emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(RegOp)), - TSFlags, HasREX, StartByte, OS, Fixups, STI); + TSFlags, Kind, StartByte, OS, Fixups, STI); break; } @@ -1627,7 +1639,7 @@ unsigned CC = MI.getOperand(CurOp++).getImm(); emitByte(BaseOpcode + CC, OS); - emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, HasREX, StartByte, OS, Fixups, + emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Kind, StartByte, OS, Fixups, STI); break; } @@ -1648,7 +1660,7 @@ emitByte(BaseOpcode, OS); emitMemModRMByte(MI, CurOp, (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags, - HasREX, StartByte, OS, Fixups, STI); + Kind, StartByte, OS, Fixups, STI); CurOp += X86::AddrNumOperands; break;