@@ -10,7 +10,7 @@ define <16 x i64> @pluto(<16 x i64> %arg, <16 x i64> %arg1, <16 x i64> %arg2, <1
10
10
; CHECK-NEXT: movq %rsp, %rbp
11
11
; CHECK-NEXT: .cfi_def_cfa_register %rbp
12
12
; CHECK-NEXT: andq $-32, %rsp
13
- ; CHECK-NEXT: subq $128 , %rsp
13
+ ; CHECK-NEXT: subq $288 , %rsp # imm = 0x120
14
14
; CHECK-NEXT: vmovaps 240(%rbp), %ymm8
15
15
; CHECK-NEXT: vmovaps 208(%rbp), %ymm9
16
16
; CHECK-NEXT: vmovaps 176(%rbp), %ymm10
@@ -23,39 +23,42 @@ define <16 x i64> @pluto(<16 x i64> %arg, <16 x i64> %arg1, <16 x i64> %arg2, <1
23
23
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,18446744071562067968,18446744071562067968]
24
24
; CHECK-NEXT: vblendvpd %ymm0, %ymm2, %ymm6, %ymm0
25
25
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
26
- ; CHECK-NEXT: vblendvpd %ymm2, %ymm3, %ymm7, %ymm3
27
- ; CHECK-NEXT: vblendvpd %ymm2, %ymm1, %ymm5, %ymm1
28
- ; CHECK-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
29
- ; CHECK-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0,1],ymm2[2,3,4,5],ymm14[6,7]
30
- ; CHECK-NEXT: vpblendd {{.*#+}} ymm7 = ymm15[0,1],ymm11[2,3,4,5,6,7]
31
- ; CHECK-NEXT: vblendvpd %ymm2, %ymm10, %ymm6, %ymm6
32
- ; CHECK-NEXT: vmovaps {{.*#+}} ymm10 = [18446744071562067968,18446744071562067968,0,0]
33
- ; CHECK-NEXT: vblendvpd %ymm10, %ymm9, %ymm5, %ymm5
34
- ; CHECK-NEXT: vblendvpd %ymm2, %ymm8, %ymm2, %ymm2
35
- ; CHECK-NEXT: vpshufd {{.*#+}} ymm8 = ymm1[0,1,0,1,4,5,4,5]
36
- ; CHECK-NEXT: vpblendd {{.*#+}} ymm9 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
37
- ; CHECK-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,1,3]
38
- ; CHECK-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7]
26
+ ; CHECK-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
27
+ ; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm8[2,3,4,5,6,7]
28
+ ; CHECK-NEXT: vmovaps {{.*#+}} ymm8 = [18446744071562067968,18446744071562067968,0,0]
29
+ ; CHECK-NEXT: vblendvpd %ymm8, %ymm9, %ymm6, %ymm6
30
+ ; CHECK-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1],ymm11[2,3,4,5,6,7]
31
+ ; CHECK-NEXT: vpalignr {{.*#+}} ymm8 = ymm0[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
32
+ ; CHECK-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,0]
33
+ ; CHECK-NEXT: vmovaps %xmm6, %xmm9
34
+ ; CHECK-NEXT: # implicit-def: %ymm11
35
+ ; CHECK-NEXT: vinserti128 $1, %xmm9, %ymm11, %ymm11
36
+ ; CHECK-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5],ymm8[6,7]
39
37
; CHECK-NEXT: vmovaps %xmm0, %xmm9
40
- ; CHECK-NEXT: # implicit-def: %ymm10
41
- ; CHECK-NEXT: vinserti128 $1, %xmm9, %ymm10, %ymm10
42
- ; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
43
- ; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
44
- ; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
45
- ; CHECK-NEXT: vpalignr {{.*#+}} ymm1 = ymm5[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
46
- ; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,3]
47
- ; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm6[0],ymm3[0],ymm6[2],ymm3[2]
48
- ; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
49
- ; CHECK-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
50
- ; CHECK-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
51
- ; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,0]
52
- ; CHECK-NEXT: vmovaps %xmm5, %xmm9
53
- ; CHECK-NEXT: # implicit-def: %ymm1
54
- ; CHECK-NEXT: vinserti128 $1, %xmm9, %ymm1, %ymm1
55
- ; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
56
- ; CHECK-NEXT: vmovaps %ymm8, %ymm1
38
+ ; CHECK-NEXT: # implicit-def: %ymm0
39
+ ; CHECK-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
40
+ ; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm11 = ymm7[0],ymm2[0],ymm7[2],ymm2[2]
41
+ ; CHECK-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
42
+ ; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
43
+ ; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3],ymm7[4,5,6,7]
44
+ ; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,1,3]
45
+ ; CHECK-NEXT: vpshufd {{.*#+}} ymm11 = ymm5[0,1,0,1,4,5,4,5]
46
+ ; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm11[6,7]
47
+ ; CHECK-NEXT: vpalignr {{.*#+}} ymm5 = ymm6[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
48
+ ; CHECK-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,3]
49
+ ; CHECK-NEXT: vpslldq {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,ymm7[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[16,17,18,19,20,21,22,23]
50
+ ; CHECK-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
51
+ ; CHECK-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
52
+ ; CHECK-NEXT: vmovaps %ymm8, %ymm0
53
+ ; CHECK-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
54
+ ; CHECK-NEXT: vmovaps %ymm2, %ymm1
55
+ ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm2 # 32-byte Reload
56
+ ; CHECK-NEXT: vmovaps %ymm3, {{[0-9]+}}(%rsp) # 32-byte Spill
57
+ ; CHECK-NEXT: vmovaps %ymm5, %ymm3
58
+ ; CHECK-NEXT: vmovaps %ymm10, {{[0-9]+}}(%rsp) # 32-byte Spill
59
+ ; CHECK-NEXT: vmovaps %ymm12, {{[0-9]+}}(%rsp) # 32-byte Spill
57
60
; CHECK-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp) # 32-byte Spill
58
- ; CHECK-NEXT: vmovaps %ymm12 , (%rsp) # 32-byte Spill
61
+ ; CHECK-NEXT: vmovaps %ymm14 , (%rsp) # 32-byte Spill
59
62
; CHECK-NEXT: movq %rbp, %rsp
60
63
; CHECK-NEXT: popq %rbp
61
64
; CHECK-NEXT: retq
0 commit comments