Skip to content

Commit 52d73fc

Browse files
author
Guy Blank
committedJun 21, 2017
[DAGCombiner] Add another combine from build vector to shuffle
Add support for combining a build vector to a shuffle. When the build vector is of extracted elements from 2 vectors (vec1, vec2) where vec2 is 2 times smaller than vec1. llvm-svn: 305883
1 parent eac01d4 commit 52d73fc

File tree

3 files changed

+18
-37
lines changed

3 files changed

+18
-37
lines changed
 

‎llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

+5
Original file line numberDiff line numberDiff line change
@@ -14055,6 +14055,11 @@ SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
1405514055
// when we start sorting the vectors by type.
1405614056
return SDValue();
1405714057
}
14058+
} else if (InVT2.getSizeInBits() * 2 == VT.getSizeInBits() &&
14059+
InVT1.getSizeInBits() == VT.getSizeInBits()) {
14060+
SmallVector<SDValue, 2> ConcatOps(2, DAG.getUNDEF(InVT2));
14061+
ConcatOps[0] = VecIn2;
14062+
VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
1405814063
} else {
1405914064
// TODO: Support cases where the length mismatch isn't exactly by a
1406014065
// factor of 2.

‎llvm/test/CodeGen/AArch64/arm64-neon-copy.ll

+1-1
Original file line numberDiff line numberDiff line change
@@ -1378,7 +1378,7 @@ entry:
13781378

13791379
define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 {
13801380
; CHECK-LABEL: test_concat_v2i64_v2i64_v1i64:
1381-
; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
1381+
; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
13821382
entry:
13831383
%vecext = extractelement <2 x i64> %x, i32 0
13841384
%vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0

‎llvm/test/CodeGen/X86/vector-shuffle-v48.ll

+12-36
Original file line numberDiff line numberDiff line change
@@ -3,42 +3,18 @@
33
define <32 x i8> @foo(<48 x i8>* %x0, <16 x i32> %x1, <16 x i32> %x2) {
44
; CHECK-LABEL: foo:
55
; CHECK: # BB#0:
6-
; CHECK-NEXT: vmovdqu 32(%rdi), %xmm0
7-
; CHECK-NEXT: vmovdqu (%rdi), %ymm1
8-
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
9-
; CHECK-NEXT: vpextrb $0, %xmm2, %eax
10-
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,3,4,6,7,9,10,12,13,15],zero,zero,zero,zero,zero
11-
; CHECK-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
12-
; CHECK-NEXT: vpextrb $2, %xmm2, %eax
13-
; CHECK-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
14-
; CHECK-NEXT: vpextrb $3, %xmm2, %eax
15-
; CHECK-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
16-
; CHECK-NEXT: vpextrb $5, %xmm2, %eax
17-
; CHECK-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
18-
; CHECK-NEXT: vpextrb $6, %xmm2, %eax
19-
; CHECK-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
20-
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
21-
; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,11,12,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
22-
; CHECK-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
23-
; CHECK-NEXT: vpextrb $2, %xmm0, %eax
24-
; CHECK-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
25-
; CHECK-NEXT: vpextrb $4, %xmm0, %eax
26-
; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
27-
; CHECK-NEXT: vpextrb $5, %xmm0, %eax
28-
; CHECK-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
29-
; CHECK-NEXT: vpextrb $7, %xmm0, %eax
30-
; CHECK-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
31-
; CHECK-NEXT: vpextrb $8, %xmm0, %eax
32-
; CHECK-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
33-
; CHECK-NEXT: vpextrb $10, %xmm0, %eax
34-
; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
35-
; CHECK-NEXT: vpextrb $11, %xmm0, %eax
36-
; CHECK-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
37-
; CHECK-NEXT: vpextrb $13, %xmm0, %eax
38-
; CHECK-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
39-
; CHECK-NEXT: vpextrb $14, %xmm0, %eax
40-
; CHECK-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
41-
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
6+
; CHECK-NEXT: vmovdqu (%rdi), %ymm0
7+
; CHECK-NEXT: vmovdqu 32(%rdi), %xmm1
8+
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,1,2,4,5,7,8,10,11,13,14]
9+
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10+
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
11+
; CHECK-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,2,3,5,6]
12+
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,3,4,6,7,9,10,12,13,15],zero,zero,zero,zero,zero
13+
; CHECK-NEXT: vpor %xmm3, %xmm0, %xmm0
14+
; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,11,12,14,15,u,u,u,u,u,u,u,u,u,u]
15+
; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
16+
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0]
17+
; CHECK-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
4218
; CHECK-NEXT: retq
4319
%1 = load <48 x i8>, <48 x i8>* %x0, align 1
4420
%2 = shufflevector <48 x i8> %1, <48 x i8> undef, <32 x i32> <i32 0, i32 1, i32 3, i32 4, i32 6, i32 7, i32 9, i32 10, i32 12, i32 13, i32 15, i32 16, i32 18, i32 19, i32 21, i32 22, i32 24, i32 25, i32 27, i32 28, i32 30, i32 31, i32 33, i32 34, i32 36, i32 37, i32 39, i32 40, i32 42, i32 43, i32 45, i32 46>

0 commit comments

Comments
 (0)
Please sign in to comment.