Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -8024,7 +8024,7 @@ DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); if (DoXform) { LoadSDNode *LN0 = cast(N0); - SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, + SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), VT, LN0->getChain(), LN0->getBasePtr(), N0.getValueType(), LN0->getMemOperand()); Index: test/CodeGen/AArch64/arm64-aapcs.ll =================================================================== --- test/CodeGen/AArch64/arm64-aapcs.ll +++ test/CodeGen/AArch64/arm64-aapcs.ll @@ -24,36 +24,35 @@ @var64 = global i64 0, align 8 - ; Check stack slots are 64-bit at all times. +; Check stack slots are 64-bit at all times. define void @test_stack_slots([8 x i32], i1 %bool, i8 %char, i16 %short, i32 %int, i64 %long) { +; CHECK-LABEL: test_stack_slots: +; CHECK-DAG: ldr w[[ext1:[0-9]+]], [sp, #24] +; CHECK-DAG: ldrh w[[ext2:[0-9]+]], [sp, #16] +; CHECK-DAG: ldrb w[[ext3:[0-9]+]], [sp, #8] +; CHECK-DAG: ldr x[[ext4:[0-9]+]], [sp, #32] +; CHECK-DAG: ldrb w[[ext5:[0-9]+]], [sp] +; CHECK-DAG: and x[[ext5]], x[[ext5]], #0x1 + %ext_bool = zext i1 %bool to i64 store volatile i64 %ext_bool, i64* @var64, align 8 - ; Part of last store. Blasted scheduler. -; CHECK: ldr [[LONG:x[0-9]+]], [sp, #32] - -; CHECK: ldrb w[[EXT:[0-9]+]], [sp] - -; CHECK: and x[[EXTED:[0-9]+]], x[[EXT]], #0x1 -; CHECK: str x[[EXTED]], [{{x[0-9]+}}, :lo12:var64] +; CHECK: str x[[ext5]], [{{x[0-9]+}}, :lo12:var64] %ext_char = zext i8 %char to i64 store volatile i64 %ext_char, i64* @var64, align 8 -; CHECK: ldrb w[[EXT:[0-9]+]], [sp, #8] -; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64] +; CHECK: str x[[ext3]], [{{x[0-9]+}}, :lo12:var64] %ext_short = zext i16 %short to i64 store volatile i64 %ext_short, i64* @var64, align 8 -; CHECK: ldrh w[[EXT:[0-9]+]], [sp, #16] -; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64] +; CHECK: str x[[ext2]], [{{x[0-9]+}}, :lo12:var64] %ext_int = zext i32 %int to i64 store volatile i64 %ext_int, i64* @var64, align 8 -; CHECK: ldr{{b?}} w[[EXT:[0-9]+]], [sp, #24] -; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64] +; CHECK: str x[[ext1]], [{{x[0-9]+}}, :lo12:var64] store volatile i64 %long, i64* @var64, align 8 -; CHECK: str [[LONG]], [{{x[0-9]+}}, :lo12:var64] +; CHECK: str x[[ext4]], [{{x[0-9]+}}, :lo12:var64] ret void } Index: test/CodeGen/AArch64/arm64-ldp-cluster.ll =================================================================== --- test/CodeGen/AArch64/arm64-ldp-cluster.ll +++ test/CodeGen/AArch64/arm64-ldp-cluster.ll @@ -67,14 +67,14 @@ ; Test sext + zext clustering. ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: ldp_half_sext_zext_int:%bb.0 -; CHECK: Cluster ld/st SU(3) - SU(4) -; CHECK: SU(3): %{{[0-9]+}}:gpr64 = LDRSWui -; CHECK: SU(4): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui +; CHECK: Cluster ld/st SU(4) - SU(3) +; CHECK: SU(3): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui +; CHECK: SU(4): %{{[0-9]+}}:gpr64 = LDRSWui ; EXYNOSM1: ********** MI Scheduling ********** ; EXYNOSM1-LABEL: ldp_half_sext_zext_int:%bb.0 -; EXYNOSM1: Cluster ld/st SU(3) - SU(4) -; EXYNOSM1: SU(3): %{{[0-9]+}}:gpr64 = LDRSWui -; EXYNOSM1: SU(4): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui +; EXYNOSM1: Cluster ld/st SU(4) - SU(3) +; EXYNOSM1: SU(3): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui +; EXYNOSM1: SU(4): %{{[0-9]+}}:gpr64 = LDRSWui define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind { %tmp0 = load i64, i64* %q, align 4 %tmp = load i32, i32* %p, align 4 Index: test/CodeGen/ARM/vector-load.ll =================================================================== --- test/CodeGen/ARM/vector-load.ll +++ test/CodeGen/ARM/vector-load.ll @@ -240,9 +240,9 @@ ;CHECK-LABEL: zextload_v8i8tov8i32_fake_update: ;CHECK: ldr r[[PTRREG:[0-9]+]], [r0] ;CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r[[PTRREG]]:32] -;CHECK: add.w r[[INCREG:[0-9]+]], r[[PTRREG]], #16 ;CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}} ;CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}} +;CHECK: add.w r[[INCREG:[0-9]+]], r[[PTRREG]], #16 ;CHECK: str r[[INCREG]], [r0] %A = load <4 x i8>*, <4 x i8>** %ptr %lA = load <4 x i8>, <4 x i8>* %A, align 4 Index: test/CodeGen/X86/avg.ll =================================================================== --- test/CodeGen/X86/avg.ll +++ test/CodeGen/X86/avg.ll @@ -293,14 +293,14 @@ ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm5[1,1,2,3] ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero -; AVX1-NEXT: vmovdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero -; AVX1-NEXT: vmovdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero ; AVX1-NEXT: vpaddd %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vmovdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[3,1,2,3] ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm9 @@ -328,11 +328,11 @@ ; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm6 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero -; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm2, %xmm2 # 16-byte Folded Reload +; AVX1-NEXT: vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload +; AVX1-NEXT: vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload ; AVX1-NEXT: vpcmpeqd %xmm7, %xmm7, %xmm7 -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-NEXT: vpsubd %xmm7, %xmm1, %xmm10 ; AVX1-NEXT: vpsubd %xmm7, %xmm9, %xmm9 ; AVX1-NEXT: vpsubd %xmm7, %xmm8, %xmm8 @@ -1747,14 +1747,14 @@ ; AVX1-NEXT: vmovdqa 304(%rbp), %ymm15 ; AVX1-NEXT: vpavgb %xmm13, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill +; AVX1-NEXT: vmovaps %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vextractf128 $1, %ymm15, %xmm14 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0 ; AVX1-NEXT: vpavgb %xmm14, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa 336(%rbp), %ymm14 ; AVX1-NEXT: vpavgb %xmm15, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill +; AVX1-NEXT: vmovaps %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vextractf128 $1, %ymm14, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX1-NEXT: vpavgb %xmm0, %xmm1, %xmm0 @@ -1857,9 +1857,9 @@ ; AVX1-NEXT: vmovaps %ymm3, 96(%rdi) ; AVX1-NEXT: vmovaps (%rsp), %ymm0 # 32-byte Reload ; AVX1-NEXT: vmovaps %ymm0, 64(%rdi) -; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX1-NEXT: vmovaps %ymm0, 32(%rdi) -; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX1-NEXT: vmovaps %ymm0, (%rdi) ; AVX1-NEXT: movq %rdi, %rax ; AVX1-NEXT: movq %rbp, %rsp @@ -2022,13 +2022,13 @@ ; SSE2-NEXT: movaps (%rsi), %xmm0 ; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d @@ -2067,19 +2067,19 @@ ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx ; SSE2-NEXT: leaq -1(%r14,%rbx), %r14 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload ; SSE2-NEXT: leaq -1(%rdi,%rbx), %rdi -; SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload ; SSE2-NEXT: leaq -1(%rdi,%rbx), %rbx -; SSE2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload ; SSE2-NEXT: leaq -1(%rdi,%rbx), %rbx -; SSE2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload ; SSE2-NEXT: leaq -1(%r10,%rbx), %rbx ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: movd %eax, %xmm8 @@ -2105,13 +2105,13 @@ ; SSE2-NEXT: movd %r15d, %xmm13 ; SSE2-NEXT: shrq %r14 ; SSE2-NEXT: movd %r14d, %xmm7 -; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: movd %eax, %xmm14 -; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: shrq %rbx @@ -2172,15 +2172,15 @@ ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero ; AVX1-NEXT: vpextrq $1, %xmm2, %rdx -; AVX1-NEXT: vmovq %xmm2, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; AVX1-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; AVX1-NEXT: vmovq %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: vpextrq $1, %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX1-NEXT: vmovq %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero -; AVX1-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: vpextrq $1, %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero @@ -2221,22 +2221,22 @@ ; AVX1-NEXT: addq %rdx, %rbp ; AVX1-NEXT: movq %rbp, %r8 ; AVX1-NEXT: vmovq %xmm3, %rbp -; AVX1-NEXT: addq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload -; AVX1-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload +; AVX1-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero ; AVX1-NEXT: vpextrq $1, %xmm2, %rdx -; AVX1-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; AVX1-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload +; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: vmovq %xmm2, %rdx -; AVX1-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; AVX1-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload +; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero ; AVX1-NEXT: vpextrq $1, %xmm2, %rdx -; AVX1-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; AVX1-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload +; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: vmovq %xmm2, %r12 ; AVX1-NEXT: addq %rax, %r12 ; AVX1-NEXT: vpextrq $1, %xmm0, %rax @@ -2248,59 +2248,59 @@ ; AVX1-NEXT: vmovq %xmm1, %rdi ; AVX1-NEXT: addq %rax, %rdi ; AVX1-NEXT: addq $-1, %r9 -; AVX1-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: movl $0, %eax ; AVX1-NEXT: adcq $-1, %rax -; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: addq $-1, %r13 -; AVX1-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: movl $0, %eax ; AVX1-NEXT: adcq $-1, %rax -; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: addq $-1, %rcx ; AVX1-NEXT: movq %rcx, (%rsp) # 8-byte Spill ; AVX1-NEXT: movl $0, %eax ; AVX1-NEXT: adcq $-1, %rax -; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: addq $-1, %r11 -; AVX1-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: movl $0, %eax ; AVX1-NEXT: adcq $-1, %rax -; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: addq $-1, %rsi -; AVX1-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: movl $0, %eax ; AVX1-NEXT: adcq $-1, %rax -; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: addq $-1, %rbx -; AVX1-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: movl $0, %eax ; AVX1-NEXT: adcq $-1, %rax -; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: addq $-1, %r15 -; AVX1-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: movl $0, %ebp ; AVX1-NEXT: adcq $-1, %rbp ; AVX1-NEXT: addq $-1, %r14 -; AVX1-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: movl $0, %r15d ; AVX1-NEXT: adcq $-1, %r15 ; AVX1-NEXT: addq $-1, %r8 -; AVX1-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: movl $0, %eax ; AVX1-NEXT: adcq $-1, %rax ; AVX1-NEXT: movq %rax, %rsi -; AVX1-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX1-NEXT: movl $0, %r13d ; AVX1-NEXT: adcq $-1, %r13 -; AVX1-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX1-NEXT: movl $0, %r14d ; AVX1-NEXT: adcq $-1, %r14 -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX1-NEXT: addq $-1, %rdx ; AVX1-NEXT: movl $0, %r11d ; AVX1-NEXT: adcq $-1, %r11 -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: addq $-1, %rax ; AVX1-NEXT: movl $0, %ebx ; AVX1-NEXT: adcq $-1, %rbx @@ -2314,39 +2314,39 @@ ; AVX1-NEXT: movl $0, %ecx ; AVX1-NEXT: adcq $-1, %rcx ; AVX1-NEXT: shldq $63, %rdi, %rcx -; AVX1-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: shldq $63, %r10, %r8 ; AVX1-NEXT: shldq $63, %r12, %r9 ; AVX1-NEXT: shldq $63, %rax, %rbx ; AVX1-NEXT: shldq $63, %rdx, %r11 -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX1-NEXT: shldq $63, %rdx, %r14 -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX1-NEXT: shldq $63, %rdx, %r13 -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rax, %rsi -; AVX1-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rax, %r15 -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rax, %rbp -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rax, %rsi -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rax, %rcx -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rax, %rdi -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload ; AVX1-NEXT: movq (%rsp), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rax, %r12 -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rax, %r10 -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload -; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shldq $63, %rdx, %rax ; AVX1-NEXT: vmovq %rax, %xmm8 ; AVX1-NEXT: vmovq %r10, %xmm0 @@ -2356,7 +2356,7 @@ ; AVX1-NEXT: vmovq %rsi, %xmm13 ; AVX1-NEXT: vmovq %rbp, %xmm14 ; AVX1-NEXT: vmovq %r15, %xmm15 -; AVX1-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm9 # 8-byte Folded Reload +; AVX1-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 8-byte Folded Reload ; AVX1-NEXT: # xmm9 = mem[0],zero ; AVX1-NEXT: vmovq %r13, %xmm10 ; AVX1-NEXT: vmovq %r14, %xmm12 @@ -2364,7 +2364,7 @@ ; AVX1-NEXT: vmovq %rbx, %xmm4 ; AVX1-NEXT: vmovq %r9, %xmm5 ; AVX1-NEXT: vmovq %r8, %xmm6 -; AVX1-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm7 # 8-byte Folded Reload +; AVX1-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 8-byte Folded Reload ; AVX1-NEXT: # xmm7 = mem[0],zero ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm0[0],xmm8[0] ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm11[0],xmm1[0] @@ -2408,147 +2408,146 @@ ; AVX2-NEXT: pushq %r12 ; AVX2-NEXT: pushq %rbx ; AVX2-NEXT: subq $16, %rsp +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX2-NEXT: vpextrq $1, %xmm4, %rbx +; AVX2-NEXT: vmovq %xmm4, %rbp +; AVX2-NEXT: vpextrq $1, %xmm3, %rdi +; AVX2-NEXT: vmovq %xmm3, %rcx +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpextrq $1, %xmm3, %rdx +; AVX2-NEXT: vmovq %xmm3, %r9 +; AVX2-NEXT: vpextrq $1, %xmm2, %r11 +; AVX2-NEXT: vmovq %xmm2, %r12 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX2-NEXT: vpextrq $1, %xmm3, %rcx -; AVX2-NEXT: vmovq %xmm3, %rax -; AVX2-NEXT: vpextrq $1, %xmm2, %rbx -; AVX2-NEXT: vmovq %xmm2, %rdx +; AVX2-NEXT: vpextrq $1, %xmm3, %r15 +; AVX2-NEXT: vmovq %xmm3, %rsi +; AVX2-NEXT: vpextrq $1, %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX2-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpextrq $1, %xmm2, %rdi -; AVX2-NEXT: vmovq %xmm2, %r11 -; AVX2-NEXT: vpextrq $1, %xmm1, %r13 -; AVX2-NEXT: vmovq %xmm1, %r12 -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpextrq $1, %xmm2, %rbp -; AVX2-NEXT: vmovq %xmm2, %r10 -; AVX2-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; AVX2-NEXT: vmovq %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; AVX2-NEXT: vmovq %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX2-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 -; AVX2-NEXT: vpextrq $1, %xmm4, %r15 -; AVX2-NEXT: addq %rcx, %r15 -; AVX2-NEXT: vmovq %xmm4, %r9 -; AVX2-NEXT: addq %rax, %r9 -; AVX2-NEXT: vpextrq $1, %xmm3, %rax +; AVX2-NEXT: vpextrq $1, %xmm4, %rax ; AVX2-NEXT: addq %rbx, %rax ; AVX2-NEXT: movq %rax, %rbx -; AVX2-NEXT: vmovq %xmm3, %rax -; AVX2-NEXT: addq %rdx, %rax -; AVX2-NEXT: movq %rax, %r8 +; AVX2-NEXT: vmovq %xmm4, %r13 +; AVX2-NEXT: addq %rbp, %r13 +; AVX2-NEXT: vpextrq $1, %xmm3, %r10 +; AVX2-NEXT: addq %rdi, %r10 +; AVX2-NEXT: vmovq %xmm3, %r14 +; AVX2-NEXT: addq %rcx, %r14 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX2-NEXT: vpextrq $1, %xmm3, %rax -; AVX2-NEXT: addq %rdi, %rax +; AVX2-NEXT: addq %rdx, %rax ; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: vmovq %xmm3, %r8 +; AVX2-NEXT: addq %r9, %r8 +; AVX2-NEXT: vpextrq $1, %xmm2, %rax ; AVX2-NEXT: addq %r11, %rax ; AVX2-NEXT: movq %rax, %r11 -; AVX2-NEXT: vpextrq $1, %xmm2, %r14 -; AVX2-NEXT: addq %r13, %r14 ; AVX2-NEXT: vmovq %xmm2, %rax ; AVX2-NEXT: addq %r12, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX2-NEXT: vpextrq $1, %xmm3, %rax -; AVX2-NEXT: addq %rbp, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq %r15, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vmovq %xmm3, %rax -; AVX2-NEXT: addq %r10, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq %rsi, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vpextrq $1, %xmm2, %rax -; AVX2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vmovq %xmm2, %rax -; AVX2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX2-NEXT: vpextrq $1, %xmm2, %rbp -; AVX2-NEXT: addq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload -; AVX2-NEXT: vmovq %xmm2, %r10 -; AVX2-NEXT: addq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload -; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: vpextrq $1, %xmm1, %rdi +; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload +; AVX2-NEXT: vmovq %xmm2, %r9 +; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vpextrq $1, %xmm0, %rdi ; AVX2-NEXT: addq %rax, %rdi -; AVX2-NEXT: vmovq %xmm0, %rdx -; AVX2-NEXT: vmovq %xmm1, %rsi +; AVX2-NEXT: vmovq %xmm1, %rdx +; AVX2-NEXT: vmovq %xmm0, %rsi ; AVX2-NEXT: addq %rdx, %rsi -; AVX2-NEXT: addq $-1, %r15 -; AVX2-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, %rbx +; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %eax ; AVX2-NEXT: adcq $-1, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: addq $-1, %r9 -; AVX2-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addq $-1, %r13 +; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %eax ; AVX2-NEXT: adcq $-1, %rax ; AVX2-NEXT: movq %rax, (%rsp) # 8-byte Spill -; AVX2-NEXT: addq $-1, %rbx -; AVX2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, %r10 +; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %eax ; AVX2-NEXT: adcq $-1, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: addq $-1, %r8 -; AVX2-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addq $-1, %r14 +; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %r13d ; AVX2-NEXT: adcq $-1, %r13 ; AVX2-NEXT: addq $-1, %rcx -; AVX2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %eax ; AVX2-NEXT: adcq $-1, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: addq $-1, %r11 -; AVX2-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addq $-1, %r8 +; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %r15d ; AVX2-NEXT: adcq $-1, %r15 -; AVX2-NEXT: addq $-1, %r14 -; AVX2-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, %r11 +; AVX2-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %ebx ; AVX2-NEXT: adcq $-1, %rbx -; AVX2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX2-NEXT: movl $0, %r8d ; AVX2-NEXT: adcq $-1, %r8 -; AVX2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX2-NEXT: movl $0, %eax ; AVX2-NEXT: adcq $-1, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX2-NEXT: movl $0, %eax ; AVX2-NEXT: adcq $-1, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX2-NEXT: movl $0, %r12d ; AVX2-NEXT: adcq $-1, %r12 -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: addq $-1, %rcx ; AVX2-NEXT: movl $0, %r11d ; AVX2-NEXT: adcq $-1, %r11 ; AVX2-NEXT: addq $-1, %rbp ; AVX2-NEXT: movl $0, %r14d ; AVX2-NEXT: adcq $-1, %r14 -; AVX2-NEXT: addq $-1, %r10 -; AVX2-NEXT: movl $0, %r9d -; AVX2-NEXT: adcq $-1, %r9 +; AVX2-NEXT: addq $-1, %r9 +; AVX2-NEXT: movl $0, %r10d +; AVX2-NEXT: adcq $-1, %r10 ; AVX2-NEXT: addq $-1, %rdi ; AVX2-NEXT: movl $0, %edx ; AVX2-NEXT: adcq $-1, %rdx @@ -2556,37 +2555,37 @@ ; AVX2-NEXT: movl $0, %eax ; AVX2-NEXT: adcq $-1, %rax ; AVX2-NEXT: shldq $63, %rsi, %rax -; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: shldq $63, %rdi, %rdx -; AVX2-NEXT: shldq $63, %r10, %r9 +; AVX2-NEXT: shldq $63, %r9, %r10 ; AVX2-NEXT: shldq $63, %rbp, %r14 ; AVX2-NEXT: shldq $63, %rcx, %r11 -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %r12 -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX2-NEXT: shldq $63, %rcx, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; AVX2-NEXT: shldq $63, %rcx, %r10 -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %r9 +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %r8 -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX2-NEXT: shldq $63, %rax, %rbx -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX2-NEXT: shldq $63, %rax, %r15 -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %rax -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %r13 -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %rbp ; AVX2-NEXT: movq (%rsp), %rdi # 8-byte Reload -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %rdi -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %rsi ; AVX2-NEXT: vmovq %rsi, %xmm8 ; AVX2-NEXT: vmovq %rdi, %xmm9 @@ -2596,15 +2595,15 @@ ; AVX2-NEXT: vmovq %r15, %xmm13 ; AVX2-NEXT: vmovq %rbx, %xmm14 ; AVX2-NEXT: vmovq %r8, %xmm15 -; AVX2-NEXT: vmovq %r10, %xmm0 -; AVX2-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm1 # 8-byte Folded Reload +; AVX2-NEXT: vmovq %r9, %xmm0 +; AVX2-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Folded Reload ; AVX2-NEXT: # xmm1 = mem[0],zero ; AVX2-NEXT: vmovq %r12, %xmm2 ; AVX2-NEXT: vmovq %r11, %xmm3 ; AVX2-NEXT: vmovq %r14, %xmm4 -; AVX2-NEXT: vmovq %r9, %xmm5 +; AVX2-NEXT: vmovq %r10, %xmm5 ; AVX2-NEXT: vmovq %rdx, %xmm6 -; AVX2-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm7 # 8-byte Folded Reload +; AVX2-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 8-byte Folded Reload ; AVX2-NEXT: # xmm7 = mem[0],zero ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm9[0],xmm8[0] ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm10[0] @@ -2658,58 +2657,58 @@ ; AVX512-NEXT: pushq %rbx ; AVX512-NEXT: subq $24, %rsp ; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX512-NEXT: vpextrq $1, %xmm4, %rbx +; AVX512-NEXT: vmovq %xmm4, %rbp +; AVX512-NEXT: vpextrq $1, %xmm3, %rdi +; AVX512-NEXT: vmovq %xmm3, %rsi ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX512-NEXT: vpextrq $1, %xmm3, %rcx -; AVX512-NEXT: vmovq %xmm3, %rax -; AVX512-NEXT: vpextrq $1, %xmm2, %rbx -; AVX512-NEXT: vmovq %xmm2, %rbp -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512-NEXT: vpextrq $1, %xmm2, %rdi -; AVX512-NEXT: vmovq %xmm2, %r8 -; AVX512-NEXT: vpextrq $1, %xmm1, %r13 -; AVX512-NEXT: vmovq %xmm1, %r12 +; AVX512-NEXT: vpextrq $1, %xmm3, %rdx +; AVX512-NEXT: vmovq %xmm3, %r8 +; AVX512-NEXT: vpextrq $1, %xmm2, %r13 +; AVX512-NEXT: vmovq %xmm2, %r12 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512-NEXT: vpextrq $1, %xmm2, %r15 -; AVX512-NEXT: vmovq %xmm2, %r14 -; AVX512-NEXT: vpextrq $1, %xmm1, %rdx -; AVX512-NEXT: vmovq %xmm1, %r9 +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %r15 +; AVX512-NEXT: vmovq %xmm3, %r14 +; AVX512-NEXT: vpextrq $1, %xmm2, %r9 +; AVX512-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; AVX512-NEXT: vmovq %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX512-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4 -; AVX512-NEXT: vpextrq $1, %xmm4, %rsi -; AVX512-NEXT: addq %rcx, %rsi -; AVX512-NEXT: vmovq %xmm4, %rcx -; AVX512-NEXT: addq %rax, %rcx -; AVX512-NEXT: vpextrq $1, %xmm3, %rax +; AVX512-NEXT: vpextrq $1, %xmm4, %rax ; AVX512-NEXT: addq %rbx, %rax ; AVX512-NEXT: movq %rax, %rbx -; AVX512-NEXT: vmovq %xmm3, %rax +; AVX512-NEXT: vmovq %xmm4, %rax ; AVX512-NEXT: addq %rbp, %rax -; AVX512-NEXT: movq %rax, %r10 -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero -; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX512-NEXT: movq %rax, %rbp ; AVX512-NEXT: vpextrq $1, %xmm3, %rax ; AVX512-NEXT: addq %rdi, %rax ; AVX512-NEXT: movq %rax, %rdi +; AVX512-NEXT: vmovq %xmm3, %r10 +; AVX512-NEXT: addq %rsi, %r10 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %rcx +; AVX512-NEXT: addq %rdx, %rcx ; AVX512-NEXT: vmovq %xmm3, %rax ; AVX512-NEXT: addq %r8, %rax ; AVX512-NEXT: movq %rax, %r8 -; AVX512-NEXT: vpextrq $1, %xmm2, %rbp -; AVX512-NEXT: addq %r13, %rbp +; AVX512-NEXT: vpextrq $1, %xmm2, %rsi +; AVX512-NEXT: addq %r13, %rsi ; AVX512-NEXT: vmovq %xmm2, %r11 ; AVX512-NEXT: addq %r12, %r11 ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 @@ -2719,82 +2718,82 @@ ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX512-NEXT: vpextrq $1, %xmm3, %rax ; AVX512-NEXT: addq %r15, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vmovq %xmm3, %rax ; AVX512-NEXT: addq %r14, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vpextrq $1, %xmm2, %rax -; AVX512-NEXT: addq %rdx, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX512-NEXT: vmovq %xmm2, %rax ; AVX512-NEXT: addq %r9, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: vmovq %xmm2, %rax +; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX512-NEXT: vpextrq $1, %xmm2, %rax -; AVX512-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vmovq %xmm2, %r14 -; AVX512-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload ; AVX512-NEXT: vpextrq $1, %xmm0, %rax ; AVX512-NEXT: vpextrq $1, %xmm1, %r9 ; AVX512-NEXT: addq %rax, %r9 ; AVX512-NEXT: vmovq %xmm0, %rax ; AVX512-NEXT: vmovq %xmm1, %rdx ; AVX512-NEXT: addq %rax, %rdx -; AVX512-NEXT: addq $-1, %rsi -; AVX512-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rbx +; AVX512-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX512-NEXT: addq $-1, %rcx -; AVX512-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rbp +; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; AVX512-NEXT: addq $-1, %rbx -; AVX512-NEXT: movq %rbx, (%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rdi +; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, (%rsp) # 8-byte Spill ; AVX512-NEXT: addq $-1, %r10 -; AVX512-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX512-NEXT: addq $-1, %rdi -; AVX512-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rcx +; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: addq $-1, %r8 -; AVX512-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX512-NEXT: addq $-1, %rbp -; AVX512-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rsi +; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %r13d ; AVX512-NEXT: adcq $-1, %r13 ; AVX512-NEXT: addq $-1, %r11 -; AVX512-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %r15d ; AVX512-NEXT: adcq $-1, %r15 -; AVX512-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: movq %rax, %rsi -; AVX512-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: movl $0, %r12d ; AVX512-NEXT: adcq $-1, %r12 -; AVX512-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: movl $0, %ebx ; AVX512-NEXT: adcq $-1, %rbx -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload ; AVX512-NEXT: addq $-1, %rbp ; AVX512-NEXT: movl $0, %r11d ; AVX512-NEXT: adcq $-1, %r11 -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: addq $-1, %rax ; AVX512-NEXT: movl $0, %r10d ; AVX512-NEXT: adcq $-1, %r10 @@ -2808,39 +2807,39 @@ ; AVX512-NEXT: movl $0, %ecx ; AVX512-NEXT: adcq $-1, %rcx ; AVX512-NEXT: shldq $63, %rdx, %rcx -; AVX512-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: shldq $63, %r9, %rdi ; AVX512-NEXT: shldq $63, %r14, %r8 ; AVX512-NEXT: shldq $63, %rax, %r10 ; AVX512-NEXT: shldq $63, %rbp, %r11 -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX512-NEXT: shldq $63, %rdx, %rbx -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX512-NEXT: shldq $63, %rdx, %r12 -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX512-NEXT: shldq $63, %rdx, %rsi -; AVX512-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %r15 -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %r13 -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %rsi -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %rcx -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX512-NEXT: shldq $63, %rdx, %rax -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; AVX512-NEXT: movq (%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: movq (%rsp), %r14 # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX512-NEXT: shldq $63, %rdx, %r14 -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX512-NEXT: shldq $63, %rdx, %r9 -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload -; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload ; AVX512-NEXT: shldq $63, %rdx, %rbp ; AVX512-NEXT: vmovq %rbp, %xmm8 ; AVX512-NEXT: vmovq %r9, %xmm9 @@ -2850,7 +2849,7 @@ ; AVX512-NEXT: vmovq %rsi, %xmm13 ; AVX512-NEXT: vmovq %r13, %xmm14 ; AVX512-NEXT: vmovq %r15, %xmm15 -; AVX512-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm0 # 8-byte Folded Reload +; AVX512-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload ; AVX512-NEXT: # xmm0 = mem[0],zero ; AVX512-NEXT: vmovq %r12, %xmm1 ; AVX512-NEXT: vmovq %rbx, %xmm2 @@ -2858,7 +2857,7 @@ ; AVX512-NEXT: vmovq %r10, %xmm4 ; AVX512-NEXT: vmovq %r8, %xmm5 ; AVX512-NEXT: vmovq %rdi, %xmm6 -; AVX512-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm7 # 8-byte Folded Reload +; AVX512-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 8-byte Folded Reload ; AVX512-NEXT: # xmm7 = mem[0],zero ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm9[0],xmm8[0] ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm10[0] Index: test/CodeGen/X86/dagcombine-cse.ll =================================================================== --- test/CodeGen/X86/dagcombine-cse.ll +++ test/CodeGen/X86/dagcombine-cse.ll @@ -24,13 +24,13 @@ ; X64-NEXT: imull %ecx, %esi ; X64-NEXT: leal (%rsi,%rdx), %eax ; X64-NEXT: cltq +; X64-NEXT: movl (%rdi,%rax), %eax ; X64-NEXT: leal 4(%rsi,%rdx), %ecx ; X64-NEXT: movslq %ecx, %rcx ; X64-NEXT: movzwl (%rdi,%rcx), %ecx ; X64-NEXT: shlq $32, %rcx -; X64-NEXT: movl (%rdi,%rax), %eax -; X64-NEXT: orq %rcx, %rax -; X64-NEXT: movq %rax, %xmm0 +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: movq %rcx, %xmm0 ; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7] ; X64-NEXT: movd %xmm0, %eax Index: test/CodeGen/X86/fold-zext-trunc-dbginfo.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/fold-zext-trunc-dbginfo.ll @@ -0,0 +1,352 @@ +; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -stop-after livedebugvalues -o - | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.13.0" + +%TSa = type <{ %Ts12_ArrayBufferV }> +%Ts12_ArrayBufferV = type <{ %Ts14_BridgeStorageV }> +%Ts14_BridgeStorageV = type <{ %swift.bridge* }> +%swift.bridge = type opaque +%swift.type = type { i64 } +%swift.type_metadata_record = type { i32 } +%Ts6UInt16V = type <{ i16 }> +%Ts16IndexingIteratorVys5RangeVySiGG = type <{ %Ts5RangeVySiG, %TSi }> +%Ts5RangeVySiG = type <{ %TSi, %TSi }> +%TSi = type <{ i64 }> +%TSiSg = type <{ [8 x i8], [1 x i8] }> +%Any = type { [24 x i8], %swift.type* } +%TSS = type <{ %Ts11_StringGutsV }> +%Ts11_StringGutsV = type <{ %Ts13_StringObjectV, %TSu }> +%Ts13_StringObjectV = type <{ %swift.bridge* }> +%TSu = type <{ i64 }> +%swift.metadata_response = type { %swift.type*, i64 } + +@"$S8stepping6zeroesSays6UInt16VGvp" = hidden global %TSa zeroinitializer, align 8, !dbg !0 +@"$SSSN" = external global %swift.type, align 8 +@0 = private unnamed_addr constant [1 x i8] zeroinitializer +@"$SBi64_WV" = external global i8*, align 8 +@1 = private constant [9 x i8] c"stepping\00" +@"$S8steppingMXM" = linkonce_odr hidden constant <{ i32, i32, i32 }> <{ i32 0, i32 0, i32 trunc (i64 sub (i64 ptrtoint ([9 x i8]* @1 to i64), i64 ptrtoint (i32* getelementptr inbounds (<{ i32, i32, i32 }>, <{ i32, i32, i32 }>* @"$S8steppingMXM", i32 0, i32 2) to i64)) to i32) }>, section "__TEXT,__const", align 4 +@2 = private constant [3 x i8] c"Pt\00" +@"$S8stepping2PtVMn" = hidden constant <{ i32, i32, i32, i32, i32, i32 }> <{ i32 262225, i32 trunc (i64 sub (i64 ptrtoint (<{ i32, i32, i32 }>* @"$S8steppingMXM" to i64), i64 ptrtoint (i32* getelementptr inbounds (<{ i32, i32, i32, i32, i32, i32 }>, <{ i32, i32, i32, i32, i32, i32 }>* @"$S8stepping2PtVMn", i32 0, i32 1) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint ([3 x i8]* @2 to i64), i64 ptrtoint (i32* getelementptr inbounds (<{ i32, i32, i32, i32, i32, i32 }>, <{ i32, i32, i32, i32, i32, i32 }>* @"$S8stepping2PtVMn", i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (%swift.metadata_response (i64)* @"$S8stepping2PtVMa" to i64), i64 ptrtoint (i32* getelementptr inbounds (<{ i32, i32, i32, i32, i32, i32 }>, <{ i32, i32, i32, i32, i32, i32 }>* @"$S8stepping2PtVMn", i32 0, i32 3) to i64)) to i32), i32 1, i32 2 }>, section "__TEXT,__const", align 4 +@"$S8stepping2PtVMf" = internal constant <{ i8**, i64, <{ i32, i32, i32, i32, i32, i32 }>*, i32, [4 x i8] }> <{ i8** @"$SBi64_WV", i64 1, <{ i32, i32, i32, i32, i32, i32 }>* @"$S8stepping2PtVMn", i32 0, [4 x i8] zeroinitializer }>, align 8 +@"symbolic \01____ 8stepping2PtV" = linkonce_odr hidden constant <{ [1 x i8], i32, i8 }> <{ [1 x i8] c"\01", i32 trunc (i64 sub (i64 ptrtoint (<{ i32, i32, i32, i32, i32, i32 }>* @"$S8stepping2PtVMn" to i64), i64 ptrtoint (i32* getelementptr inbounds (<{ [1 x i8], i32, i8 }>, <{ [1 x i8], i32, i8 }>* @"symbolic \01____ 8stepping2PtV", i32 0, i32 1) to i64)) to i32), i8 0 }>, section "__TEXT,__swift4_typeref, regular, no_dead_strip", align 1 +@"symbolic Sd" = linkonce_odr hidden constant <{ [2 x i8], i8 }> <{ [2 x i8] c"Sd", i8 0 }>, section "__TEXT,__swift4_typeref, regular, no_dead_strip", align 1 +@3 = private constant [2 x i8] c"x\00", section "__TEXT,__swift4_reflstr, regular, no_dead_strip" +@"$S8stepping2PtVMF" = internal constant { i32, i32, i16, i16, i32, i32, i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (<{ [1 x i8], i32, i8 }>* @"symbolic \01____ 8stepping2PtV" to i64), i64 ptrtoint ({ i32, i32, i16, i16, i32, i32, i32, i32 }* @"$S8stepping2PtVMF" to i64)) to i32), i32 0, i16 0, i16 12, i32 1, i32 0, i32 trunc (i64 sub (i64 ptrtoint (<{ [2 x i8], i8 }>* @"symbolic Sd" to i64), i64 ptrtoint (i32* getelementptr inbounds ({ i32, i32, i16, i16, i32, i32, i32, i32 }, { i32, i32, i16, i16, i32, i32, i32, i32 }* @"$S8stepping2PtVMF", i32 0, i32 6) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint ([2 x i8]* @3 to i64), i64 ptrtoint (i32* getelementptr inbounds ({ i32, i32, i16, i16, i32, i32, i32, i32 }, { i32, i32, i16, i16, i32, i32, i32, i32 }* @"$S8stepping2PtVMF", i32 0, i32 7) to i64)) to i32) }, section "__TEXT,__swift4_fieldmd, regular, no_dead_strip", align 4 +@"\01l_type_metadata_table" = private constant [1 x %swift.type_metadata_record] [%swift.type_metadata_record { i32 trunc (i64 sub (i64 ptrtoint (<{ i32, i32, i32, i32, i32, i32 }>* @"$S8stepping2PtVMn" to i64), i64 ptrtoint ([1 x %swift.type_metadata_record]* @"\01l_type_metadata_table" to i64)) to i32) }], section "__TEXT, __swift4_types, regular, no_dead_strip", align 4 +@__swift_reflection_version = linkonce_odr hidden constant i16 3 +@llvm.used = appending global [3 x i8*] [i8* bitcast ({ i32, i32, i16, i16, i32, i32, i32, i32 }* @"$S8stepping2PtVMF" to i8*), i8* bitcast ([1 x %swift.type_metadata_record]* @"\01l_type_metadata_table" to i8*), i8* bitcast (i16* @__swift_reflection_version to i8*)], section "llvm.metadata", align 8 + +@"$S8stepping2PtVN" = hidden alias %swift.type, bitcast (i64* getelementptr inbounds (<{ i8**, i64, <{ i32, i32, i32, i32, i32, i32 }>*, i32, [4 x i8] }>, <{ i8**, i64, <{ i32, i32, i32, i32, i32, i32 }>*, i32, [4 x i8] }>* @"$S8stepping2PtVMf", i32 0, i32 1) to %swift.type*) + +define i32 @main(i32, i8**) !dbg !38 { + %3 = alloca %Ts6UInt16V, align 2 + %"$generator" = alloca %Ts16IndexingIteratorVys5RangeVySiGG, align 8 + %4 = bitcast %Ts16IndexingIteratorVys5RangeVySiGG* %"$generator" to %TSi* + %5 = bitcast %Ts16IndexingIteratorVys5RangeVySiGG* %"$generator" to %Ts6UInt16V* + %6 = alloca %Ts5RangeVySiG, align 8 + %7 = bitcast %Ts5RangeVySiG* %6 to %TSiSg* + %8 = bitcast %Ts5RangeVySiG* %6 to %Ts6UInt16V* + %9 = alloca %TSi, align 8 + %10 = alloca %TSi, align 8 + %11 = alloca %Ts5RangeVySiG, align 8 + %12 = bitcast %Ts5RangeVySiG* %11 to %TSi* + %13 = alloca %TSiSg, align 8 + %14 = alloca %Ts6UInt16V, align 2 + %15 = bitcast %Ts6UInt16V* %5 to i8*, !dbg !44 + %._value6 = bitcast %Ts6UInt16V* %5 to i16* + store i16 0, i16* %._value6, align 2, !dbg !45 + %._value17 = bitcast %Ts6UInt16V* %5 to i16* + %16 = load i16, i16* %._value17, align 2, !dbg !46 + %17 = call swiftcc %swift.bridge* @"$SSa9repeating5countSayxGx_SitcfCs6UInt16V_Tg5"(i16 %16, i64 1), !dbg !46 + %18 = bitcast %Ts6UInt16V* %5 to i8*, !dbg !46 + store %swift.bridge* %17, %swift.bridge** getelementptr inbounds (%TSa, %TSa* @"$S8stepping6zeroesSays6UInt16VGvp", i32 0, i32 0, i32 0, i32 0), align 8, !dbg !46 + %19 = bitcast %Ts16IndexingIteratorVys5RangeVySiGG* %"$generator" to i8*, !dbg !47 + %20 = bitcast %Ts5RangeVySiG* %6 to i8*, !dbg !47 + %21 = bitcast %TSi* %4 to i8*, !dbg !47 + %._value28 = bitcast %TSi* %4 to i64* + store i64 0, i64* %._value28, align 8, !dbg !49 + %22 = bitcast %TSi* %12 to i8*, !dbg !47 + %._value39 = bitcast %TSi* %12 to i64* + store i64 1, i64* %._value39, align 8, !dbg !49 + %._value410 = bitcast %TSi* %4 to i64* + %23 = load i64, i64* %._value410, align 8, !dbg !49 + %._value511 = bitcast %TSi* %12 to i64* + %24 = load i64, i64* %._value511, align 8, !dbg !49 + %25 = call swiftcc { i64, i64 } @"$Ss5RangeV15uncheckedBoundsAByxGx5lower_x5uppert_tcfCSi_Tg5"(i64 %23, i64 %24), !dbg !49 + %26 = extractvalue { i64, i64 } %25, 0, !dbg !49 + %27 = extractvalue { i64, i64 } %25, 1, !dbg !49 + %.lowerBound12 = bitcast %Ts5RangeVySiG* %6 to %TSi* + %.lowerBound._value13 = bitcast %TSi* %.lowerBound12 to i64* + store i64 %26, i64* %.lowerBound._value13, align 8, !dbg !49 + %.upperBound = getelementptr inbounds %Ts5RangeVySiG, %Ts5RangeVySiG* %6, i32 0, i32 1, !dbg !49 + %.upperBound._value14 = bitcast %TSi* %.upperBound to i64* + store i64 %27, i64* %.upperBound._value14, align 8, !dbg !49 + %28 = bitcast %TSi* %12 to i8*, !dbg !49 + %29 = bitcast %TSi* %4 to i8*, !dbg !49 + %.lowerBound615 = bitcast %Ts5RangeVySiG* %6 to %TSi* + %.lowerBound6._value16 = bitcast %TSi* %.lowerBound615 to i64* + %30 = load i64, i64* %.lowerBound6._value16, align 8, !dbg !49 + %.upperBound7 = getelementptr inbounds %Ts5RangeVySiG, %Ts5RangeVySiG* %6, i32 0, i32 1, !dbg !49 + %.upperBound7._value17 = bitcast %TSi* %.upperBound7 to i64* + %31 = load i64, i64* %.upperBound7._value17, align 8, !dbg !49 + %32 = bitcast %Ts5RangeVySiG* %11 to i8*, !dbg !47 + %.lowerBound818 = bitcast %Ts5RangeVySiG* %11 to %TSi* + %.lowerBound8._value19 = bitcast %TSi* %.lowerBound818 to i64* + store i64 %30, i64* %.lowerBound8._value19, align 8, !dbg !49 + %.upperBound9 = getelementptr inbounds %Ts5RangeVySiG, %Ts5RangeVySiG* %11, i32 0, i32 1, !dbg !49 + %.upperBound9._value20 = bitcast %TSi* %.upperBound9 to i64* + store i64 %31, i64* %.upperBound9._value20, align 8, !dbg !49 + %.lowerBound1021 = bitcast %Ts5RangeVySiG* %11 to %TSi* + %.lowerBound10._value22 = bitcast %TSi* %.lowerBound1021 to i64* + %33 = load i64, i64* %.lowerBound10._value22, align 8, !dbg !50 + %.upperBound11 = getelementptr inbounds %Ts5RangeVySiG, %Ts5RangeVySiG* %11, i32 0, i32 1, !dbg !50 + %.upperBound11._value23 = bitcast %TSi* %.upperBound11 to i64* + %34 = load i64, i64* %.upperBound11._value23, align 8, !dbg !50 + %35 = call swiftcc { i64, i64, i64 } @"$Ss10CollectionPss16IndexingIteratorVyxG0C0RtzrlE04makeC0AEyFs5RangeVySiG_Tg5"(i64 %33, i64 %34), !dbg !50 + %36 = extractvalue { i64, i64, i64 } %35, 0, !dbg !50 + %37 = extractvalue { i64, i64, i64 } %35, 1, !dbg !50 + %38 = extractvalue { i64, i64, i64 } %35, 2, !dbg !50 + %"$generator._elements24" = bitcast %Ts16IndexingIteratorVys5RangeVySiGG* %"$generator" to %Ts5RangeVySiG* + %"$generator._elements.lowerBound25" = bitcast %Ts5RangeVySiG* %"$generator._elements24" to %TSi* + %"$generator._elements.lowerBound._value26" = bitcast %TSi* %"$generator._elements.lowerBound25" to i64* + store i64 %36, i64* %"$generator._elements.lowerBound._value26", align 8, !dbg !50 + %"$generator._elements.upperBound" = getelementptr inbounds %Ts5RangeVySiG, %Ts5RangeVySiG* %"$generator._elements24", i32 0, i32 1, !dbg !50 + %"$generator._elements.upperBound._value27" = bitcast %TSi* %"$generator._elements.upperBound" to i64* + store i64 %37, i64* %"$generator._elements.upperBound._value27", align 8, !dbg !50 + %"$generator._position" = getelementptr inbounds %Ts16IndexingIteratorVys5RangeVySiGG, %Ts16IndexingIteratorVys5RangeVySiGG* %"$generator", i32 0, i32 1, !dbg !50 + %"$generator._position._value28" = bitcast %TSi* %"$generator._position" to i64* + store i64 %38, i64* %"$generator._position._value28", align 8, !dbg !50 + %39 = bitcast %Ts5RangeVySiG* %11 to i8*, !dbg !51 + %40 = bitcast %Ts5RangeVySiG* %6 to i8*, !dbg !51 + br label %41, !dbg !51 + +;