Index: llvm/lib/CodeGen/LiveInterval.cpp =================================================================== --- llvm/lib/CodeGen/LiveInterval.cpp +++ llvm/lib/CodeGen/LiveInterval.cpp @@ -445,7 +445,7 @@ while (true) { // J has just been advanced to satisfy: - assert(J->end >= I->start); + assert(J->end > I->start); // Check for an overlap. if (J->start < I->end) { // I and J are overlapping. Find the later start. @@ -460,11 +460,11 @@ std::swap(I, J); std::swap(IE, JE); } - // Advance J until J->end >= I->start. + // Advance J until J->end > I->start. do if (++J == JE) return false; - while (J->end < I->start); + while (J->end <= I->start); } } Index: llvm/test/CodeGen/AArch64/copy-sink.ll =================================================================== --- llvm/test/CodeGen/AArch64/copy-sink.ll +++ llvm/test/CodeGen/AArch64/copy-sink.ll @@ -147,15 +147,14 @@ ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: mov x8, x1 ; CHECK-NEXT: tbz w0, #0, .LBB4_2 ; CHECK-NEXT: // %bb.1: // %if.then -; CHECK-NEXT: add x0, x8, x2 +; CHECK-NEXT: add x0, x1, x2 ; CHECK-NEXT: bl use ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB4_2: // %if.else -; CHECK-NEXT: add x1, x8, x2 +; CHECK-NEXT: add x1, x1, x2 ; CHECK-NEXT: mov w0, #1 ; CHECK-NEXT: bl use ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload Index: llvm/test/CodeGen/ARM/neon-copy.ll =================================================================== --- llvm/test/CodeGen/ARM/neon-copy.ll +++ llvm/test/CodeGen/ARM/neon-copy.ll @@ -612,11 +612,11 @@ define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) { ; CHECK-LABEL: test_vcopyq_laneq_swap_s8: ; CHECK: @ %bb.0: -; CHECK-NEXT: vorr q9, q1, q1 -; CHECK-NEXT: vldr d20, .LCPI53_0 -; CHECK-NEXT: vorr q8, q0, q0 -; CHECK-NEXT: vtbl.8 d18, {d17, d18}, d20 -; CHECK-NEXT: vorr q0, q9, q9 +; CHECK-NEXT: @ kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1 +; CHECK-NEXT: vldr d16, .LCPI53_0 +; CHECK-NEXT: @ kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1 +; CHECK-NEXT: vtbl.8 d2, {d1, d2}, d16 +; CHECK-NEXT: vorr q0, q1, q1 ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: Index: llvm/test/CodeGen/BPF/sockex2.ll =================================================================== --- llvm/test/CodeGen/BPF/sockex2.ll +++ llvm/test/CodeGen/BPF/sockex2.ll @@ -311,7 +311,7 @@ ; CHECK-LABEL: bpf_prog2: ; CHECK: r0 = *(u16 *)skb[12] # encoding: [0x28,0x00,0x00,0x00,0x0c,0x00,0x00,0x00] ; CHECK: r0 = *(u16 *)skb[16] # encoding: [0x28,0x00,0x00,0x00,0x10,0x00,0x00,0x00] -; CHECK: implicit-def: $r1 +; CHECK: implicit-def: $r8 ; CHECK: r1 = ; CHECK: call 1 # encoding: [0x85,0x00,0x00,0x00,0x01,0x00,0x00,0x00] ; CHECK: call 2 # encoding: [0x85,0x00,0x00,0x00,0x02,0x00,0x00,0x00] Index: llvm/test/CodeGen/X86/muloti.ll =================================================================== --- llvm/test/CodeGen/X86/muloti.ll +++ llvm/test/CodeGen/X86/muloti.ll @@ -15,12 +15,12 @@ ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movq %rdx, %r11 ; CHECK-NEXT: movq %rdi, %r10 -; CHECK-NEXT: movq %rsi, %rdi -; CHECK-NEXT: sarq $63, %rdi +; CHECK-NEXT: movq %rsi, %rdx +; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: movq %rcx, %r8 -; CHECK-NEXT: imulq %rdi, %r8 -; CHECK-NEXT: movq %rdx, %rax -; CHECK-NEXT: mulq %rdi +; CHECK-NEXT: imulq %rdx, %r8 +; CHECK-NEXT: movq %r11, %rax +; CHECK-NEXT: mulq %rdx ; CHECK-NEXT: movq %rdx, %rdi ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: addq %rax, %rdi Index: llvm/test/CodeGen/X86/smulo-128-legalisation-lowering.ll =================================================================== --- llvm/test/CodeGen/X86/smulo-128-legalisation-lowering.ll +++ llvm/test/CodeGen/X86/smulo-128-legalisation-lowering.ll @@ -16,12 +16,12 @@ ; X64-NEXT: .cfi_offset %r15, -16 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rdi, %r11 -; X64-NEXT: movq %rsi, %rdi -; X64-NEXT: sarq $63, %rdi +; X64-NEXT: movq %rsi, %rdx +; X64-NEXT: sarq $63, %rdx ; X64-NEXT: movq %rcx, %r9 -; X64-NEXT: imulq %rdi, %r9 -; X64-NEXT: movq %rdx, %rax -; X64-NEXT: mulq %rdi +; X64-NEXT: imulq %rdx, %r9 +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %rdx ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %r14 ; X64-NEXT: addq %rax, %rdi Index: llvm/test/CodeGen/X86/vec_smulo.ll =================================================================== --- llvm/test/CodeGen/X86/vec_smulo.ll +++ llvm/test/CodeGen/X86/vec_smulo.ll @@ -3310,12 +3310,12 @@ ; SSE2-NEXT: movq %rdi, %r10 ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rsi ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rbp -; SSE2-NEXT: movq %r11, %rdi -; SSE2-NEXT: sarq $63, %rdi +; SSE2-NEXT: movq %r11, %rdx +; SSE2-NEXT: sarq $63, %rdx ; SSE2-NEXT: movq %r9, %rbx -; SSE2-NEXT: imulq %rdi, %rbx +; SSE2-NEXT: imulq %rdx, %rbx ; SSE2-NEXT: movq %r15, %rax -; SSE2-NEXT: mulq %rdi +; SSE2-NEXT: mulq %rdx ; SSE2-NEXT: movq %rdx, %rdi ; SSE2-NEXT: movq %rax, %r12 ; SSE2-NEXT: addq %rax, %rdi @@ -3363,12 +3363,12 @@ ; SSE2-NEXT: xorl %r15d, %r15d ; SSE2-NEXT: orq %rdx, %r10 ; SSE2-NEXT: setne %r15b -; SSE2-NEXT: movq %rcx, %r9 -; SSE2-NEXT: sarq $63, %r9 +; SSE2-NEXT: movq %rcx, %rdx +; SSE2-NEXT: sarq $63, %rdx ; SSE2-NEXT: movq %rbp, %r11 -; SSE2-NEXT: imulq %r9, %r11 +; SSE2-NEXT: imulq %rdx, %r11 ; SSE2-NEXT: movq %rsi, %rax -; SSE2-NEXT: mulq %r9 +; SSE2-NEXT: mulq %rdx ; SSE2-NEXT: movq %rdx, %r9 ; SSE2-NEXT: movq %rax, %r10 ; SSE2-NEXT: addq %rax, %r9 @@ -3444,12 +3444,12 @@ ; SSSE3-NEXT: movq %rdi, %r10 ; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %rsi ; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %rbp -; SSSE3-NEXT: movq %r11, %rdi -; SSSE3-NEXT: sarq $63, %rdi +; SSSE3-NEXT: movq %r11, %rdx +; SSSE3-NEXT: sarq $63, %rdx ; SSSE3-NEXT: movq %r9, %rbx -; SSSE3-NEXT: imulq %rdi, %rbx +; SSSE3-NEXT: imulq %rdx, %rbx ; SSSE3-NEXT: movq %r15, %rax -; SSSE3-NEXT: mulq %rdi +; SSSE3-NEXT: mulq %rdx ; SSSE3-NEXT: movq %rdx, %rdi ; SSSE3-NEXT: movq %rax, %r12 ; SSSE3-NEXT: addq %rax, %rdi @@ -3497,12 +3497,12 @@ ; SSSE3-NEXT: xorl %r15d, %r15d ; SSSE3-NEXT: orq %rdx, %r10 ; SSSE3-NEXT: setne %r15b -; SSSE3-NEXT: movq %rcx, %r9 -; SSSE3-NEXT: sarq $63, %r9 +; SSSE3-NEXT: movq %rcx, %rdx +; SSSE3-NEXT: sarq $63, %rdx ; SSSE3-NEXT: movq %rbp, %r11 -; SSSE3-NEXT: imulq %r9, %r11 +; SSSE3-NEXT: imulq %rdx, %r11 ; SSSE3-NEXT: movq %rsi, %rax -; SSSE3-NEXT: mulq %r9 +; SSSE3-NEXT: mulq %rdx ; SSSE3-NEXT: movq %rdx, %r9 ; SSSE3-NEXT: movq %rax, %r10 ; SSSE3-NEXT: addq %rax, %r9 @@ -3578,12 +3578,12 @@ ; SSE41-NEXT: movq %rdi, %r10 ; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %rsi ; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %rbp -; SSE41-NEXT: movq %r11, %rdi -; SSE41-NEXT: sarq $63, %rdi +; SSE41-NEXT: movq %r11, %rdx +; SSE41-NEXT: sarq $63, %rdx ; SSE41-NEXT: movq %r9, %rbx -; SSE41-NEXT: imulq %rdi, %rbx +; SSE41-NEXT: imulq %rdx, %rbx ; SSE41-NEXT: movq %r15, %rax -; SSE41-NEXT: mulq %rdi +; SSE41-NEXT: mulq %rdx ; SSE41-NEXT: movq %rdx, %rdi ; SSE41-NEXT: movq %rax, %r12 ; SSE41-NEXT: addq %rax, %rdi @@ -3631,12 +3631,12 @@ ; SSE41-NEXT: xorl %r15d, %r15d ; SSE41-NEXT: orq %rdx, %r10 ; SSE41-NEXT: setne %r15b -; SSE41-NEXT: movq %rcx, %r9 -; SSE41-NEXT: sarq $63, %r9 +; SSE41-NEXT: movq %rcx, %rdx +; SSE41-NEXT: sarq $63, %rdx ; SSE41-NEXT: movq %rbp, %r11 -; SSE41-NEXT: imulq %r9, %r11 +; SSE41-NEXT: imulq %rdx, %r11 ; SSE41-NEXT: movq %rsi, %rax -; SSE41-NEXT: mulq %r9 +; SSE41-NEXT: mulq %rdx ; SSE41-NEXT: movq %rdx, %r9 ; SSE41-NEXT: movq %rax, %r10 ; SSE41-NEXT: addq %rax, %r9 @@ -3711,12 +3711,12 @@ ; AVX-NEXT: movq %rdi, %r10 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rsi ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rbp -; AVX-NEXT: movq %r11, %rdi -; AVX-NEXT: sarq $63, %rdi +; AVX-NEXT: movq %r11, %rdx +; AVX-NEXT: sarq $63, %rdx ; AVX-NEXT: movq %r9, %rbx -; AVX-NEXT: imulq %rdi, %rbx +; AVX-NEXT: imulq %rdx, %rbx ; AVX-NEXT: movq %r15, %rax -; AVX-NEXT: mulq %rdi +; AVX-NEXT: mulq %rdx ; AVX-NEXT: movq %rdx, %rdi ; AVX-NEXT: movq %rax, %r12 ; AVX-NEXT: addq %rax, %rdi @@ -3764,12 +3764,12 @@ ; AVX-NEXT: xorl %r15d, %r15d ; AVX-NEXT: orq %rdx, %r10 ; AVX-NEXT: setne %r15b -; AVX-NEXT: movq %rcx, %r9 -; AVX-NEXT: sarq $63, %r9 +; AVX-NEXT: movq %rcx, %rdx +; AVX-NEXT: sarq $63, %rdx ; AVX-NEXT: movq %rbp, %r11 -; AVX-NEXT: imulq %r9, %r11 +; AVX-NEXT: imulq %rdx, %r11 ; AVX-NEXT: movq %rsi, %rax -; AVX-NEXT: mulq %r9 +; AVX-NEXT: mulq %rdx ; AVX-NEXT: movq %rdx, %r9 ; AVX-NEXT: movq %rax, %r10 ; AVX-NEXT: addq %rax, %r9 @@ -3896,16 +3896,16 @@ ; AVX512F-NEXT: orq %rdx, %r10 ; AVX512F-NEXT: setne %al ; AVX512F-NEXT: kmovw %eax, %k0 -; AVX512F-NEXT: movq %r9, %rsi -; AVX512F-NEXT: sarq $63, %rsi -; AVX512F-NEXT: movq %rbp, %rbx -; AVX512F-NEXT: imulq %rsi, %rbx +; AVX512F-NEXT: movq %r9, %rdx +; AVX512F-NEXT: sarq $63, %rdx +; AVX512F-NEXT: movq %rbp, %rsi +; AVX512F-NEXT: imulq %rdx, %rsi ; AVX512F-NEXT: movq %r8, %rax -; AVX512F-NEXT: mulq %rsi +; AVX512F-NEXT: mulq %rdx ; AVX512F-NEXT: movq %rdx, %r10 ; AVX512F-NEXT: movq %rax, %r11 ; AVX512F-NEXT: addq %rax, %r10 -; AVX512F-NEXT: addq %rbx, %r10 +; AVX512F-NEXT: addq %rsi, %r10 ; AVX512F-NEXT: movq %rbp, %rax ; AVX512F-NEXT: sarq $63, %rax ; AVX512F-NEXT: movq %rax, %rsi @@ -4029,16 +4029,16 @@ ; AVX512BW-NEXT: orq %rdx, %r10 ; AVX512BW-NEXT: setne %al ; AVX512BW-NEXT: kmovd %eax, %k0 -; AVX512BW-NEXT: movq %r9, %rsi -; AVX512BW-NEXT: sarq $63, %rsi -; AVX512BW-NEXT: movq %rbp, %rbx -; AVX512BW-NEXT: imulq %rsi, %rbx +; AVX512BW-NEXT: movq %r9, %rdx +; AVX512BW-NEXT: sarq $63, %rdx +; AVX512BW-NEXT: movq %rbp, %rsi +; AVX512BW-NEXT: imulq %rdx, %rsi ; AVX512BW-NEXT: movq %r8, %rax -; AVX512BW-NEXT: mulq %rsi +; AVX512BW-NEXT: mulq %rdx ; AVX512BW-NEXT: movq %rdx, %r10 ; AVX512BW-NEXT: movq %rax, %r11 ; AVX512BW-NEXT: addq %rax, %r10 -; AVX512BW-NEXT: addq %rbx, %r10 +; AVX512BW-NEXT: addq %rsi, %r10 ; AVX512BW-NEXT: movq %rbp, %rax ; AVX512BW-NEXT: sarq $63, %rax ; AVX512BW-NEXT: movq %rax, %rsi