Index: llvm/trunk/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp =================================================================== --- llvm/trunk/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ llvm/trunk/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -590,6 +590,8 @@ int MemOperand, const MCInst &MI, const MCInstrDesc &Desc, raw_ostream &OS) const { + assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX."); + uint64_t Encoding = TSFlags & X86II::EncodingMask; bool HasEVEX_K = TSFlags & X86II::EVEX_K; bool HasVEX_4V = TSFlags & X86II::VEX_4V; @@ -1109,6 +1111,10 @@ : X86II::OpSize16)) EmitByte(0x66, CurByte, OS); + // Emit the LOCK opcode prefix. + if (TSFlags & X86II::LOCK) + EmitByte(0xF0, CurByte, OS); + switch (TSFlags & X86II::OpPrefixMask) { case X86II::PD: // 66 EmitByte(0x66, CurByte, OS); @@ -1182,10 +1188,6 @@ int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode); if (MemoryOperand != -1) MemoryOperand += CurOp; - // Emit the lock opcode prefix as needed. - if (TSFlags & X86II::LOCK) - EmitByte(0xF0, CurByte, OS); - // Emit segment override opcode prefix as needed. if (MemoryOperand >= 0) EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg, Index: llvm/trunk/test/CodeGen/X86/atomic16.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/atomic16.ll +++ llvm/trunk/test/CodeGen/X86/atomic16.ll @@ -15,17 +15,17 @@ ; X32: incw %t2 = atomicrmw add i16* @sc16, i16 3 acquire ; X64: lock -; X64: addw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: addw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: addw $3 %t3 = atomicrmw add i16* @sc16, i16 5 acquire ; X64: lock -; X64: xaddw {{.*}} # encoding: [0xf0,0x66 +; X64: xaddw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: xaddw %t4 = atomicrmw add i16* @sc16, i16 %t3 acquire ; X64: lock -; X64: addw {{.*}} # encoding: [0xf0,0x66 +; X64: addw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: addw ret void @@ -43,17 +43,17 @@ ; X32: decw %t2 = atomicrmw sub i16* @sc16, i16 3 acquire ; X64: lock -; X64: subw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: subw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: subw $3 %t3 = atomicrmw sub i16* @sc16, i16 5 acquire ; X64: lock -; X64: xaddw {{.*}} # encoding: [0xf0,0x66 +; X64: xaddw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: xaddw %t4 = atomicrmw sub i16* @sc16, i16 %t3 acquire ; X64: lock -; X64: subw {{.*}} # encoding: [0xf0,0x66 +; X64: subw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: subw ret void @@ -66,7 +66,7 @@ ; X32-LABEL: atomic_fetch_and16 %t1 = atomicrmw and i16* @sc16, i16 3 acquire ; X64: lock -; X64: andw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: andw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: andw $3 %t2 = atomicrmw and i16* @sc16, i16 5 acquire @@ -78,7 +78,7 @@ ; X32: cmpxchgw %t3 = atomicrmw and i16* @sc16, i16 %t2 acquire ; X64: lock -; X64: andw {{.*}} # encoding: [0xf0,0x66 +; X64: andw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: andw ret void @@ -91,7 +91,7 @@ ; X32-LABEL: atomic_fetch_or16 %t1 = atomicrmw or i16* @sc16, i16 3 acquire ; X64: lock -; X64: orw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: orw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: orw $3 %t2 = atomicrmw or i16* @sc16, i16 5 acquire @@ -103,7 +103,7 @@ ; X32: cmpxchgw %t3 = atomicrmw or i16* @sc16, i16 %t2 acquire ; X64: lock -; X64: orw {{.*}} # encoding: [0xf0,0x66 +; X64: orw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: orw ret void @@ -116,7 +116,7 @@ ; X32-LABEL: atomic_fetch_xor16 %t1 = atomicrmw xor i16* @sc16, i16 3 acquire ; X64: lock -; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: xorw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: xorw $3 %t2 = atomicrmw xor i16* @sc16, i16 5 acquire @@ -128,7 +128,7 @@ ; X32: cmpxchgw %t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire ; X64: lock -; X64: xorw {{.*}} # encoding: [0xf0,0x66 +; X64: xorw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: xorw ret void