Index: llvm/lib/CodeGen/MachineInstr.cpp =================================================================== --- llvm/lib/CodeGen/MachineInstr.cpp +++ llvm/lib/CodeGen/MachineInstr.cpp @@ -1228,81 +1228,84 @@ if (TII->areMemAccessesTriviallyDisjoint(*this, Other)) return false; - // FIXME: Need to handle multiple memory operands to support all targets. - if (!hasOneMemOperand() || !Other.hasOneMemOperand()) + if (memoperands_empty() || Other.memoperands_empty()) return true; - MachineMemOperand *MMOa = *memoperands_begin(); - MachineMemOperand *MMOb = *Other.memoperands_begin(); - - // The following interface to AA is fashioned after DAGCombiner::isAlias - // and operates with MachineMemOperand offset with some important - // assumptions: - // - LLVM fundamentally assumes flat address spaces. - // - MachineOperand offset can *only* result from legalization and - // cannot affect queries other than the trivial case of overlap - // checking. - // - These offsets never wrap and never step outside - // of allocated objects. - // - There should never be any negative offsets here. - // - // FIXME: Modify API to hide this math from "user" - // Even before we go to AA we can reason locally about some - // memory objects. It can save compile time, and possibly catch some - // corner cases not currently covered. - - int64_t OffsetA = MMOa->getOffset(); - int64_t OffsetB = MMOb->getOffset(); - int64_t MinOffset = std::min(OffsetA, OffsetB); - - uint64_t WidthA = MMOa->getSize(); - uint64_t WidthB = MMOb->getSize(); - bool KnownWidthA = WidthA != MemoryLocation::UnknownSize; - bool KnownWidthB = WidthB != MemoryLocation::UnknownSize; - - const Value *ValA = MMOa->getValue(); - const Value *ValB = MMOb->getValue(); - bool SameVal = (ValA && ValB && (ValA == ValB)); - if (!SameVal) { - const PseudoSourceValue *PSVa = MMOa->getPseudoValue(); - const PseudoSourceValue *PSVb = MMOb->getPseudoValue(); - if (PSVa && ValB && !PSVa->mayAlias(&MFI)) - return false; - if (PSVb && ValA && !PSVb->mayAlias(&MFI)) - return false; - if (PSVa && PSVb && (PSVa == PSVb)) - SameVal = true; - } + for (auto &&MMOa : memoperands()) { + for (auto &&MMOb : Other.memoperands()) { + // The following interface to AA is fashioned after DAGCombiner::isAlias + // and operates with MachineMemOperand offset with some important + // assumptions: + // - LLVM fundamentally assumes flat address spaces. + // - MachineOperand offset can *only* result from legalization and + // cannot affect queries other than the trivial case of overlap + // checking. + // - These offsets never wrap and never step outside + // of allocated objects. + // - There should never be any negative offsets here. + // + // FIXME: Modify API to hide this math from "user" + // Even before we go to AA we can reason locally about some + // memory objects. It can save compile time, and possibly catch some + // corner cases not currently covered. + + int64_t OffsetA = MMOa->getOffset(); + int64_t OffsetB = MMOb->getOffset(); + int64_t MinOffset = std::min(OffsetA, OffsetB); + + uint64_t WidthA = MMOa->getSize(); + uint64_t WidthB = MMOb->getSize(); + bool KnownWidthA = WidthA != MemoryLocation::UnknownSize; + bool KnownWidthB = WidthB != MemoryLocation::UnknownSize; + + const Value *ValA = MMOa->getValue(); + const Value *ValB = MMOb->getValue(); + bool SameVal = (ValA && ValB && (ValA == ValB)); + if (!SameVal) { + const PseudoSourceValue *PSVa = MMOa->getPseudoValue(); + const PseudoSourceValue *PSVb = MMOb->getPseudoValue(); + if (PSVa && ValB && !PSVa->mayAlias(&MFI)) + return false; + if (PSVb && ValA && !PSVb->mayAlias(&MFI)) + return false; + if (PSVa && PSVb && (PSVa == PSVb)) + SameVal = true; + } - if (SameVal) { - if (!KnownWidthA || !KnownWidthB) - return true; - int64_t MaxOffset = std::max(OffsetA, OffsetB); - int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB; - return (MinOffset + LowWidth > MaxOffset); - } + if (SameVal) { + if (!KnownWidthA || !KnownWidthB) + return true; + int64_t MaxOffset = std::max(OffsetA, OffsetB); + int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB; + return (MinOffset + LowWidth > MaxOffset); + } - if (!AA) - return true; + if (!AA) + return true; - if (!ValA || !ValB) - return true; + if (!ValA || !ValB) + return true; - assert((OffsetA >= 0) && "Negative MachineMemOperand offset"); - assert((OffsetB >= 0) && "Negative MachineMemOperand offset"); + assert((OffsetA >= 0) && "Negative MachineMemOperand offset"); + assert((OffsetB >= 0) && "Negative MachineMemOperand offset"); - int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset - : MemoryLocation::UnknownSize; - int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset - : MemoryLocation::UnknownSize; + int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset + : MemoryLocation::UnknownSize; + int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset + : MemoryLocation::UnknownSize; - AliasResult AAResult = AA->alias( - MemoryLocation(ValA, OverlapA, - UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), - MemoryLocation(ValB, OverlapB, - UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); + AliasResult AAResult = + AA->alias(MemoryLocation(ValA, OverlapA, + UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), + MemoryLocation(ValB, OverlapB, + UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); - return (AAResult != NoAlias); + if (AAResult != NoAlias) + return true; + } + } + + return false; } /// hasOrderedMemoryRef - Return true if this instruction may have an ordered Index: llvm/test/CodeGen/AArch64/merge-store-dependency.ll =================================================================== --- llvm/test/CodeGen/AArch64/merge-store-dependency.ll +++ llvm/test/CodeGen/AArch64/merge-store-dependency.ll @@ -19,11 +19,11 @@ ; A53-NEXT: mov x19, x8 ; A53-NEXT: mov w0, w1 ; A53-NEXT: mov w9, #256 +; A53-NEXT: stp x2, x3, [x8, #32] +; A53-NEXT: mov x2, x8 ; A53-NEXT: str q0, [x19, #16]! ; A53-NEXT: str w1, [x19] ; A53-NEXT: mov w1, #4 -; A53-NEXT: stp x2, x3, [x8, #32] -; A53-NEXT: mov x2, x8 ; A53-NEXT: str q0, [x8] ; A53-NEXT: strh w9, [x8, #24] ; A53-NEXT: str wzr, [x8, #20] Index: llvm/test/CodeGen/ARM/big-endian-neon-fp16-bitconv.ll =================================================================== --- llvm/test/CodeGen/ARM/big-endian-neon-fp16-bitconv.ll +++ llvm/test/CodeGen/ARM/big-endian-neon-fp16-bitconv.ll @@ -503,12 +503,12 @@ ; CHECK-NEXT: vmov.32 r3, d16[1] ; CHECK-NEXT: vmov.32 r1, d16[0] ; CHECK-NEXT: subs r12, r12, #1 +; CHECK-NEXT: str r12, [r0, #12] ; CHECK-NEXT: sbcs r2, r2, #0 +; CHECK-NEXT: str r2, [r0, #8] ; CHECK-NEXT: sbcs r3, r3, #0 ; CHECK-NEXT: sbc r1, r1, #0 ; CHECK-NEXT: stm r0, {r1, r3} -; CHECK-NEXT: str r2, [r0, #8] -; CHECK-NEXT: str r12, [r0, #12] ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.1: Index: llvm/test/CodeGen/Thumb2/mve-float32regloops.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-float32regloops.ll +++ llvm/test/CodeGen/Thumb2/mve-float32regloops.ll @@ -1092,6 +1092,7 @@ ; CHECK-NEXT: ldrd lr, r10, [r12, #24] ; CHECK-NEXT: vstrb.8 q0, [r11], #16 ; CHECK-NEXT: vldrw.u32 q0, [r8], #32 +; CHECK-NEXT: strd r11, r1, [sp, #24] @ 8-byte Folded Spill ; CHECK-NEXT: vldrw.u32 q1, [r8, #-28] ; CHECK-NEXT: vmul.f32 q0, q0, r0 ; CHECK-NEXT: vldrw.u32 q6, [r8, #-24] @@ -1103,13 +1104,12 @@ ; CHECK-NEXT: vfma.f32 q0, q4, r6 ; CHECK-NEXT: vldrw.u32 q3, [r8, #-8] ; CHECK-NEXT: vfma.f32 q0, q5, r5 -; CHECK-NEXT: vldrw.u32 q1, [r8, #-4] -; CHECK-NEXT: vfma.f32 q0, q2, r3 ; CHECK-NEXT: ldr r0, [sp, #16] @ 4-byte Reload +; CHECK-NEXT: vfma.f32 q0, q2, r3 +; CHECK-NEXT: vldrw.u32 q1, [r8, #-4] ; CHECK-NEXT: vfma.f32 q0, q3, lr -; CHECK-NEXT: strd r11, r1, [sp, #24] @ 8-byte Folded Spill -; CHECK-NEXT: vfma.f32 q0, q1, r10 ; CHECK-NEXT: cmp r0, #16 +; CHECK-NEXT: vfma.f32 q0, q1, r10 ; CHECK-NEXT: blo .LBB16_7 ; CHECK-NEXT: @ %bb.5: @ %for.body.preheader ; CHECK-NEXT: @ in Loop: Header=BB16_4 Depth=1 Index: llvm/test/CodeGen/Thumb2/mve-phireg.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-phireg.ll +++ llvm/test/CodeGen/Thumb2/mve-phireg.ll @@ -168,16 +168,14 @@ ; CHECK-NEXT: vmov q1, q4 ; CHECK-NEXT: vmov s1, r7 ; CHECK-NEXT: vmov.32 q1[1], r6 -; CHECK-NEXT: mov.w r10, #0 -; CHECK-NEXT: vmov.32 q1[2], r5 ; CHECK-NEXT: vmov.32 q5[0], r7 +; CHECK-NEXT: vmov.32 q1[2], r5 +; CHECK-NEXT: vmov s9, r4 ; CHECK-NEXT: vmov.32 q1[3], r4 -; CHECK-NEXT: strd r0, r10, [sp, #24] +; CHECK-NEXT: vdup.32 q6, r7 ; CHECK-NEXT: vstrw.32 q1, [sp, #76] ; CHECK-NEXT: vmov q1, q5 -; CHECK-NEXT: vmov s9, r4 ; CHECK-NEXT: vmov.32 q1[1], r7 -; CHECK-NEXT: vdup.32 q6, r7 ; CHECK-NEXT: vmov.f32 s2, s1 ; CHECK-NEXT: vmov.f32 s8, s0 ; CHECK-NEXT: vmov.32 q1[2], r6 @@ -185,6 +183,7 @@ ; CHECK-NEXT: vmov q7, q6 ; CHECK-NEXT: vmov.f32 s10, s1 ; CHECK-NEXT: mov.w r8, #4 +; CHECK-NEXT: mov.w r10, #0 ; CHECK-NEXT: vmov.32 q1[3], r4 ; CHECK-NEXT: vmov.32 q3[0], r4 ; CHECK-NEXT: vmov.32 q7[1], r4 @@ -192,6 +191,7 @@ ; CHECK-NEXT: vmov.f32 s11, s3 ; CHECK-NEXT: movs r1, #64 ; CHECK-NEXT: strh.w r8, [sp, #390] +; CHECK-NEXT: strd r0, r10, [sp, #24] ; CHECK-NEXT: vstrw.32 q0, [sp, #44] ; CHECK-NEXT: str r0, [r0] ; CHECK-NEXT: vstrw.32 q2, [r0] Index: llvm/test/CodeGen/Thumb2/mve-vst3.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-vst3.ll +++ llvm/test/CodeGen/Thumb2/mve-vst3.ll @@ -24,8 +24,8 @@ ; CHECK-NEXT: vmov.f32 s9, s6 ; CHECK-NEXT: vmov.f32 s10, s0 ; CHECK-NEXT: vmov.f32 s11, s5 -; CHECK-NEXT: strd r2, r0, [r1, #16] ; CHECK-NEXT: vstrw.32 q2, [r1] +; CHECK-NEXT: strd r2, r0, [r1, #16] ; CHECK-NEXT: pop {r4, pc} entry: %s1 = getelementptr <2 x i32>, <2 x i32>* %src, i32 0 Index: llvm/test/CodeGen/Thumb2/umulo-128-legalisation-lowering.ll =================================================================== --- llvm/test/CodeGen/Thumb2/umulo-128-legalisation-lowering.ll +++ llvm/test/CodeGen/Thumb2/umulo-128-legalisation-lowering.ll @@ -8,17 +8,17 @@ ; THUMBV7-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; THUMBV7-NEXT: .pad #44 ; THUMBV7-NEXT: sub sp, #44 -; THUMBV7-NEXT: ldrd r4, r7, [sp, #88] -; THUMBV7-NEXT: mov r5, r3 ; THUMBV7-NEXT: str r0, [sp, #40] @ 4-byte Spill ; THUMBV7-NEXT: movs r0, #0 -; THUMBV7-NEXT: strd r4, r7, [sp] -; THUMBV7-NEXT: mov r1, r3 +; THUMBV7-NEXT: ldrd r4, r7, [sp, #88] +; THUMBV7-NEXT: mov r5, r3 ; THUMBV7-NEXT: strd r0, r0, [sp, #8] +; THUMBV7-NEXT: mov r1, r3 ; THUMBV7-NEXT: mov r6, r2 ; THUMBV7-NEXT: mov r0, r2 ; THUMBV7-NEXT: movs r2, #0 ; THUMBV7-NEXT: movs r3, #0 +; THUMBV7-NEXT: strd r4, r7, [sp] ; THUMBV7-NEXT: bl __multi3 ; THUMBV7-NEXT: strd r1, r0, [sp, #32] @ 8-byte Folded Spill ; THUMBV7-NEXT: strd r3, r2, [sp, #24] @ 8-byte Folded Spill Index: llvm/test/CodeGen/X86/store_op_load_fold2.ll =================================================================== --- llvm/test/CodeGen/X86/store_op_load_fold2.ll +++ llvm/test/CodeGen/X86/store_op_load_fold2.ll @@ -17,13 +17,12 @@ store i64 %tmp2676.us.us, i64* %tmp2666 ret i32 0 -; INTEL: and {{e..}}, dword ptr [356] ; INTEL: and dword ptr [360], {{e..}} -; FIXME: mov dword ptr [356], {{e..}} -; The above line comes out as 'mov 360, eax', but when the register is ecx it works? +; INTEL: and {{e..}}, dword ptr [356] +; INTEL: mov dword ptr [356], {{e..}} -; ATT: andl 356, %{{e..}} ; ATT: andl %{{e..}}, 360 +; ATT: andl 356, %{{e..}} ; ATT: movl %{{e..}}, 356 }