diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp --- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp +++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp @@ -292,7 +292,7 @@ { X86::JMP32r_NT, X86::JMP32m_NT, TB_FOLDED_LOAD }, { X86::JMP64r, X86::JMP64m, TB_FOLDED_LOAD }, { X86::JMP64r_NT, X86::JMP64m_NT, TB_FOLDED_LOAD }, - { X86::MMX_MOVD64from64rr, X86::MMX_MOVD64from64rm, TB_FOLDED_STORE | TB_NO_REVERSE }, + { X86::MMX_MOVD64from64rr, X86::MMX_MOVD64from64mr, TB_FOLDED_STORE | TB_NO_REVERSE }, { X86::MMX_MOVD64grr, X86::MMX_MOVD64mr, TB_FOLDED_STORE | TB_NO_REVERSE }, { X86::MOV16ri, X86::MOV16mi, TB_FOLDED_STORE }, { X86::MOV16rr, X86::MOV16mr, TB_FOLDED_STORE }, diff --git a/llvm/lib/Target/X86/X86InstrMMX.td b/llvm/lib/Target/X86/X86InstrMMX.td --- a/llvm/lib/Target/X86/X86InstrMMX.td +++ b/llvm/lib/Target/X86/X86InstrMMX.td @@ -211,7 +211,7 @@ (MMX_MOVQ64rr_REV VR64:$dst, VR64:$src), 0>; let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in -def MMX_MOVD64from64rm : MMXRI<0x7E, MRMDestMem, +def MMX_MOVD64from64mr : MMXRI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), "movq\t{$src, $dst|$dst, $src}", []>, Sched<[SchedWriteVecMoveLS.MMX.MR]>; diff --git a/llvm/lib/Target/X86/X86SchedSandyBridge.td b/llvm/lib/Target/X86/X86SchedSandyBridge.td --- a/llvm/lib/Target/X86/X86SchedSandyBridge.td +++ b/llvm/lib/Target/X86/X86SchedSandyBridge.td @@ -816,7 +816,7 @@ let NumMicroOps = 1; let ResourceCycles = [1]; } -def: InstRW<[SBWriteResGroup48], (instrs MMX_MOVD64from64rm, +def: InstRW<[SBWriteResGroup48], (instrs MMX_MOVD64from64mr, VBROADCASTSSrm)>; def: InstRW<[SBWriteResGroup48], (instregex "POP(16|32|64)r", "(V?)MOV64toPQIrm",