Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -16088,6 +16088,16 @@ if (Op1->isInvariant() && Op0->writeMem()) return false; + unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3; + unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3; + + // Check for BaseIndexOffset matching. + BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0->getBasePtr(), DAG); + BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1->getBasePtr(), DAG); + if (BasePtr0.equalBaseIndex(BasePtr1)) + return !((BasePtr0.Offset + NumBytes0 <= BasePtr1.Offset) || + (BasePtr1.Offset + NumBytes1 <= BasePtr0.Offset)); + // Gather base node and offset information. SDValue Base0, Base1; int64_t Offset0, Offset1; @@ -16099,8 +16109,6 @@ Base1, Offset1, GV1, CV1); // If they have the same base address, then check to see if they overlap. - unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3; - unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3; if (Base0 == Base1 || (GV0 && (GV0 == GV1)) || (CV0 && (CV0 == CV1))) return !((Offset0 + NumBytes0) <= Offset1 || (Offset1 + NumBytes1) <= Offset0); Index: test/CodeGen/X86/i256-add.ll =================================================================== --- test/CodeGen/X86/i256-add.ll +++ test/CodeGen/X86/i256-add.ll @@ -12,34 +12,35 @@ ; X32-NEXT: subl $12, %esp ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl 8(%ecx), %edx -; X32-NEXT: movl (%ecx), %ebx -; X32-NEXT: movl 4(%ecx), %edi +; X32-NEXT: movl 8(%ecx), %edi +; X32-NEXT: movl (%ecx), %edx +; X32-NEXT: movl 4(%ecx), %ebx ; X32-NEXT: movl 28(%eax), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 24(%eax), %ebp -; X32-NEXT: addl (%eax), %ebx -; X32-NEXT: adcl 4(%eax), %edi -; X32-NEXT: adcl 8(%eax), %edx +; X32-NEXT: addl (%eax), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl 20(%eax), %esi +; X32-NEXT: adcl 4(%eax), %ebx +; X32-NEXT: adcl 8(%eax), %edi +; X32-NEXT: movl %edi, (%esp) # 4-byte Spill +; X32-NEXT: movl 20(%eax), %edi ; X32-NEXT: movl 12(%eax), %edx -; X32-NEXT: movl 16(%eax), %eax +; X32-NEXT: movl 16(%eax), %esi ; X32-NEXT: adcl 12(%ecx), %edx -; X32-NEXT: adcl 16(%ecx), %eax -; X32-NEXT: adcl 20(%ecx), %esi -; X32-NEXT: adcl 24(%ecx), %ebp -; X32-NEXT: movl %ebp, (%esp) # 4-byte Spill +; X32-NEXT: adcl 16(%ecx), %esi +; X32-NEXT: adcl 20(%ecx), %edi +; X32-NEXT: movl %ebp, %eax +; X32-NEXT: adcl 24(%ecx), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp # 4-byte Reload ; X32-NEXT: adcl %ebp, 28(%ecx) +; X32-NEXT: movl (%esp), %ebp # 4-byte Reload +; X32-NEXT: movl %ebp, 8(%ecx) +; X32-NEXT: movl %ebx, 4(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: movl %ebx, (%ecx) -; X32-NEXT: movl %edi, 4(%ecx) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 8(%ecx) ; X32-NEXT: movl %edx, 12(%ecx) -; X32-NEXT: movl %eax, 16(%ecx) -; X32-NEXT: movl %esi, 20(%ecx) -; X32-NEXT: movl (%esp), %eax # 4-byte Reload +; X32-NEXT: movl %esi, 16(%ecx) +; X32-NEXT: movl %edi, 20(%ecx) ; X32-NEXT: movl %eax, 24(%ecx) ; X32-NEXT: addl $12, %esp ; X32-NEXT: popl %esi @@ -58,9 +59,9 @@ ; X64-NEXT: adcq 8(%rsi), %rdx ; X64-NEXT: adcq 16(%rsi), %rax ; X64-NEXT: adcq %r8, 24(%rdi) -; X64-NEXT: movq %rcx, (%rdi) -; X64-NEXT: movq %rdx, 8(%rdi) ; X64-NEXT: movq %rax, 16(%rdi) +; X64-NEXT: movq %rdx, 8(%rdi) +; X64-NEXT: movq %rcx, (%rdi) ; X64-NEXT: retq %a = load i256, i256* %p %b = load i256, i256* %q @@ -96,9 +97,9 @@ ; X32-NEXT: sbbl 24(%esi), %eax ; X32-NEXT: movl 28(%esi), %esi ; X32-NEXT: sbbl %esi, 28(%ecx) -; X32-NEXT: movl %ebx, (%ecx) -; X32-NEXT: movl %ebp, 4(%ecx) ; X32-NEXT: movl %edi, 8(%ecx) +; X32-NEXT: movl %ebp, 4(%ecx) +; X32-NEXT: movl %ebx, (%ecx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: movl %esi, 12(%ecx) ; X32-NEXT: movl (%esp), %esi # 4-byte Reload @@ -122,9 +123,9 @@ ; X64-NEXT: sbbq 8(%rsi), %rdx ; X64-NEXT: sbbq 16(%rsi), %rax ; X64-NEXT: sbbq %r8, 24(%rdi) -; X64-NEXT: movq %rcx, (%rdi) -; X64-NEXT: movq %rdx, 8(%rdi) ; X64-NEXT: movq %rax, 16(%rdi) +; X64-NEXT: movq %rdx, 8(%rdi) +; X64-NEXT: movq %rcx, (%rdi) ; X64-NEXT: retq %a = load i256, i256* %p %b = load i256, i256* %q Index: test/CodeGen/X86/merge_store.ll =================================================================== --- test/CodeGen/X86/merge_store.ll +++ test/CodeGen/X86/merge_store.ll @@ -29,17 +29,8 @@ ret void } - - ;; CHECK-LABEL: indexed-store-merge - -;; We should be able to merge the 4 consecutive stores. -;; FIXMECHECK: movl $0, 2(%rsi,%rdi) - -;; CHECK: movb $0, 2(%rsi,%rdi) -;; CHECK: movb $0, 3(%rsi,%rdi) -;; CHECK: movb $0, 4(%rsi,%rdi) -;; CHECK: movb $0, 5(%rsi,%rdi) +;; CHECK: movl $0, 2(%rsi,%rdi) ;; CHECK: movb $0, (%rsi) define void @indexed-store-merge(i64 %p, i8* %v) { entry: