Changeset View
Changeset View
Standalone View
Standalone View
llvm/test/CodeGen/X86/sad.ll
Show First 20 Lines • Show All 1,124 Lines • ▼ Show 20 Lines | bb: | ||||
%tmp25 = add <16 x i32> %tmp23, %tmp24 | %tmp25 = add <16 x i32> %tmp23, %tmp24 | ||||
%tmp26 = shufflevector <16 x i32> %tmp25, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> | %tmp26 = shufflevector <16 x i32> %tmp25, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> | ||||
%tmp27 = add <16 x i32> %tmp25, %tmp26 | %tmp27 = add <16 x i32> %tmp25, %tmp26 | ||||
%tmp28 = extractelement <16 x i32> %tmp27, i64 0 | %tmp28 = extractelement <16 x i32> %tmp27, i64 0 | ||||
ret i32 %tmp28 | ret i32 %tmp28 | ||||
} | } | ||||
; This test contains two absolute difference patterns joined by an add. The result of that add is then reduced to a single element. | ; This test contains two absolute difference patterns joined by an add. The result of that add is then reduced to a single element. | ||||
; SelectionDAGBuilder should tag the joining add as a vector reduction. We neeed to recognize that both sides can use psadbw. | ; SelectionDAGBuilder should tag the joining add as a vector reduction. We neeed to recognize that both sides can use psadbw. | ||||
craig.topper: This part about SelectionDAGBuilder I think refers to the how we handled these cases before… | |||||
define dso_local i32 @sad_double_reduction_abs(<16 x i8>* %arg, <16 x i8>* %arg1, <16 x i8>* %arg2, <16 x i8>* %arg3) { | define dso_local i32 @sad_double_reduction_abs(<16 x i8>* %arg, <16 x i8>* %arg1, <16 x i8>* %arg2, <16 x i8>* %arg3) { | ||||
; SSE2-LABEL: sad_double_reduction_abs: | ; SSE2-LABEL: sad_double_reduction_abs: | ||||
; SSE2: # %bb.0: # %bb | ; SSE2: # %bb.0: # %bb | ||||
; SSE2-NEXT: movdqu (%rdi), %xmm11 | ; SSE2-NEXT: movdqu (%rdi), %xmm0 | ||||
; SSE2-NEXT: movdqu (%rsi), %xmm2 | ; SSE2-NEXT: movdqu (%rsi), %xmm1 | ||||
; SSE2-NEXT: pxor %xmm4, %xmm4 | ; SSE2-NEXT: psadbw %xmm0, %xmm1 | ||||
; SSE2-NEXT: movdqa %xmm11, %xmm10 | ; SSE2-NEXT: movdqu (%rdx), %xmm0 | ||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7] | ; SSE2-NEXT: movdqu (%rcx), %xmm2 | ||||
; SSE2-NEXT: movdqa %xmm10, %xmm8 | ; SSE2-NEXT: psadbw %xmm0, %xmm2 | ||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] | ; SSE2-NEXT: paddd %xmm1, %xmm2 | ||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm4[8],xmm11[9],xmm4[9],xmm11[10],xmm4[10],xmm11[11],xmm4[11],xmm11[12],xmm4[12],xmm11[13],xmm4[13],xmm11[14],xmm4[14],xmm11[15],xmm4[15] | ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3] | ||||
; SSE2-NEXT: movdqa %xmm11, %xmm9 | ; SSE2-NEXT: paddd %xmm2, %xmm0 | ||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7] | |||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3] | |||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3] | |||||
; SSE2-NEXT: movdqa %xmm2, %xmm5 | |||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] | |||||
; SSE2-NEXT: movdqa %xmm5, %xmm6 | |||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] | |||||
; SSE2-NEXT: psubd %xmm6, %xmm8 | |||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15] | |||||
; SSE2-NEXT: movdqa %xmm2, %xmm6 | |||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] | |||||
; SSE2-NEXT: psubd %xmm6, %xmm9 | |||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] | |||||
; SSE2-NEXT: psubd %xmm5, %xmm10 | |||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] | |||||
; SSE2-NEXT: psubd %xmm2, %xmm11 | |||||
; SSE2-NEXT: movdqa %xmm8, %xmm2 | |||||
; SSE2-NEXT: psrad $31, %xmm2 | |||||
; SSE2-NEXT: pxor %xmm2, %xmm8 | |||||
; SSE2-NEXT: psubd %xmm2, %xmm8 | |||||
; SSE2-NEXT: movdqa %xmm9, %xmm2 | |||||
; SSE2-NEXT: psrad $31, %xmm2 | |||||
; SSE2-NEXT: pxor %xmm2, %xmm9 | |||||
; SSE2-NEXT: psubd %xmm2, %xmm9 | |||||
; SSE2-NEXT: movdqa %xmm10, %xmm2 | |||||
; SSE2-NEXT: psrad $31, %xmm2 | |||||
; SSE2-NEXT: pxor %xmm2, %xmm10 | |||||
; SSE2-NEXT: psubd %xmm2, %xmm10 | |||||
; SSE2-NEXT: movdqa %xmm11, %xmm2 | |||||
; SSE2-NEXT: psrad $31, %xmm2 | |||||
; SSE2-NEXT: pxor %xmm2, %xmm11 | |||||
; SSE2-NEXT: psubd %xmm2, %xmm11 | |||||
; SSE2-NEXT: movdqu (%rdx), %xmm5 | |||||
; SSE2-NEXT: movdqu (%rcx), %xmm0 | |||||
; SSE2-NEXT: movdqa %xmm5, %xmm2 | |||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] | |||||
; SSE2-NEXT: movdqa %xmm2, %xmm6 | |||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] | |||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15] | |||||
; SSE2-NEXT: movdqa %xmm5, %xmm7 | |||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] | |||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] | |||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] | |||||
; SSE2-NEXT: movdqa %xmm0, %xmm1 | |||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] | |||||
; SSE2-NEXT: movdqa %xmm1, %xmm3 | |||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] | |||||
; SSE2-NEXT: psubd %xmm3, %xmm6 | |||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] | |||||
; SSE2-NEXT: movdqa %xmm0, %xmm3 | |||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] | |||||
; SSE2-NEXT: psubd %xmm3, %xmm7 | |||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] | |||||
; SSE2-NEXT: psubd %xmm1, %xmm2 | |||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] | |||||
; SSE2-NEXT: psubd %xmm0, %xmm5 | |||||
; SSE2-NEXT: movdqa %xmm6, %xmm0 | |||||
; SSE2-NEXT: psrad $31, %xmm0 | |||||
; SSE2-NEXT: pxor %xmm0, %xmm6 | |||||
; SSE2-NEXT: psubd %xmm0, %xmm6 | |||||
; SSE2-NEXT: movdqa %xmm7, %xmm0 | |||||
; SSE2-NEXT: psrad $31, %xmm0 | |||||
; SSE2-NEXT: pxor %xmm0, %xmm7 | |||||
; SSE2-NEXT: psubd %xmm0, %xmm7 | |||||
; SSE2-NEXT: paddd %xmm9, %xmm7 | |||||
; SSE2-NEXT: paddd %xmm8, %xmm7 | |||||
; SSE2-NEXT: paddd %xmm6, %xmm7 | |||||
; SSE2-NEXT: movdqa %xmm2, %xmm0 | |||||
; SSE2-NEXT: psrad $31, %xmm0 | |||||
; SSE2-NEXT: pxor %xmm0, %xmm2 | |||||
; SSE2-NEXT: psubd %xmm0, %xmm2 | |||||
; SSE2-NEXT: movdqa %xmm5, %xmm0 | |||||
; SSE2-NEXT: psrad $31, %xmm0 | |||||
; SSE2-NEXT: pxor %xmm0, %xmm5 | |||||
; SSE2-NEXT: psubd %xmm0, %xmm5 | |||||
; SSE2-NEXT: paddd %xmm11, %xmm5 | |||||
; SSE2-NEXT: paddd %xmm10, %xmm5 | |||||
; SSE2-NEXT: paddd %xmm7, %xmm5 | |||||
; SSE2-NEXT: paddd %xmm2, %xmm5 | |||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3] | |||||
; SSE2-NEXT: paddd %xmm5, %xmm0 | |||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] | ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] | ||||
; SSE2-NEXT: paddd %xmm0, %xmm1 | ; SSE2-NEXT: por %xmm0, %xmm1 | ||||
; SSE2-NEXT: movd %xmm1, %eax | ; SSE2-NEXT: movd %xmm1, %eax | ||||
; SSE2-NEXT: retq | ; SSE2-NEXT: retq | ||||
; | ; | ||||
; AVX1-LABEL: sad_double_reduction_abs: | ; AVX1-LABEL: sad_double_reduction_abs: | ||||
; AVX1: # %bb.0: # %bb | ; AVX1: # %bb.0: # %bb | ||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | ; AVX1-NEXT: vmovdqu (%rdi), %xmm0 | ||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | ; AVX1-NEXT: vpsadbw (%rsi), %xmm0, %xmm0 | ||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | ; AVX1-NEXT: vmovdqu (%rdx), %xmm1 | ||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | ; AVX1-NEXT: vpsadbw (%rcx), %xmm1, %xmm1 | ||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpsubd %xmm4, %xmm0, %xmm0 | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpsubd %xmm4, %xmm1, %xmm1 | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2 | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpsubd %xmm4, %xmm3, %xmm3 | |||||
; AVX1-NEXT: vpabsd %xmm0, %xmm0 | |||||
; AVX1-NEXT: vpabsd %xmm1, %xmm1 | |||||
; AVX1-NEXT: vpabsd %xmm2, %xmm8 | |||||
; AVX1-NEXT: vpabsd %xmm3, %xmm3 | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2 | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpsubd %xmm4, %xmm5, %xmm4 | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpsubd %xmm5, %xmm6, %xmm5 | |||||
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero | |||||
; AVX1-NEXT: vpsubd %xmm6, %xmm7, %xmm6 | |||||
; AVX1-NEXT: vpabsd %xmm2, %xmm2 | |||||
; AVX1-NEXT: vpabsd %xmm4, %xmm4 | |||||
; AVX1-NEXT: vpaddd %xmm1, %xmm4, %xmm1 | |||||
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | |||||
; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0 | |||||
; AVX1-NEXT: vpabsd %xmm5, %xmm1 | |||||
; AVX1-NEXT: vpabsd %xmm6, %xmm2 | |||||
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 | |||||
; AVX1-NEXT: vpaddd %xmm2, %xmm8, %xmm2 | |||||
; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0 | |||||
; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 | ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 | ||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] | ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] | ||||
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] | ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] | ||||
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 | ||||
; AVX1-NEXT: vmovd %xmm0, %eax | ; AVX1-NEXT: vmovd %xmm0, %eax | ||||
; AVX1-NEXT: retq | ; AVX1-NEXT: retq | ||||
; | ; | ||||
; AVX2-LABEL: sad_double_reduction_abs: | ; AVX2-LABEL: sad_double_reduction_abs: | ||||
; AVX2: # %bb.0: # %bb | ; AVX2: # %bb.0: # %bb | ||||
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero | ; AVX2-NEXT: vmovdqu (%rdi), %xmm0 | ||||
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero | ; AVX2-NEXT: vpsadbw (%rsi), %xmm0, %xmm0 | ||||
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero | ; AVX2-NEXT: vmovdqu (%rdx), %xmm1 | ||||
; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0 | ; AVX2-NEXT: vpsadbw (%rcx), %xmm1, %xmm1 | ||||
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero | ; AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0 | ||||
; AVX2-NEXT: vpsubd %ymm2, %ymm1, %ymm1 | |||||
; AVX2-NEXT: vpabsd %ymm0, %ymm0 | |||||
; AVX2-NEXT: vpabsd %ymm1, %ymm1 | |||||
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero | |||||
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero | |||||
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero | |||||
; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2 | |||||
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero | |||||
; AVX2-NEXT: vpsubd %ymm4, %ymm3, %ymm3 | |||||
; AVX2-NEXT: vpabsd %ymm2, %ymm2 | |||||
; AVX2-NEXT: vpabsd %ymm3, %ymm3 | |||||
; AVX2-NEXT: vpaddd %ymm1, %ymm3, %ymm1 | |||||
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 | |||||
; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0 | |||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 | |||||
; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | |||||
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] | ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] | ||||
; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ||||
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] | ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] | ||||
; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 | ||||
; AVX2-NEXT: vmovd %xmm0, %eax | ; AVX2-NEXT: vmovd %xmm0, %eax | ||||
; AVX2-NEXT: vzeroupper | |||||
; AVX2-NEXT: retq | ; AVX2-NEXT: retq | ||||
; | ; | ||||
; AVX512-LABEL: sad_double_reduction_abs: | ; AVX512-LABEL: sad_double_reduction_abs: | ||||
; AVX512: # %bb.0: # %bb | ; AVX512: # %bb.0: # %bb | ||||
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero | ; AVX512-NEXT: vmovdqu (%rdi), %xmm0 | ||||
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero | ; AVX512-NEXT: vpsadbw (%rsi), %xmm0, %xmm0 | ||||
; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0 | ; AVX512-NEXT: vmovdqu (%rdx), %xmm1 | ||||
; AVX512-NEXT: vpabsd %zmm0, %zmm0 | ; AVX512-NEXT: vpsadbw (%rcx), %xmm1, %xmm1 | ||||
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero | ; AVX512-NEXT: vpaddd %xmm0, %xmm1, %xmm0 | ||||
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero | |||||
; AVX512-NEXT: vpsubd %zmm2, %zmm1, %zmm1 | |||||
; AVX512-NEXT: vpabsd %zmm1, %zmm1 | |||||
; AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0 | |||||
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 | |||||
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 | |||||
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 | |||||
; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | |||||
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] | ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] | ||||
; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ||||
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] | ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] | ||||
; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 | ||||
; AVX512-NEXT: vmovd %xmm0, %eax | ; AVX512-NEXT: vmovd %xmm0, %eax | ||||
; AVX512-NEXT: vzeroupper | |||||
; AVX512-NEXT: retq | ; AVX512-NEXT: retq | ||||
bb: | bb: | ||||
%tmp = load <16 x i8>, <16 x i8>* %arg, align 1 | %tmp = load <16 x i8>, <16 x i8>* %arg, align 1 | ||||
%tmp4 = load <16 x i8>, <16 x i8>* %arg1, align 1 | %tmp4 = load <16 x i8>, <16 x i8>* %arg1, align 1 | ||||
%tmp5 = zext <16 x i8> %tmp to <16 x i32> | %tmp5 = zext <16 x i8> %tmp to <16 x i32> | ||||
%tmp6 = zext <16 x i8> %tmp4 to <16 x i32> | %tmp6 = zext <16 x i8> %tmp4 to <16 x i32> | ||||
%tmp7 = sub nsw <16 x i32> %tmp5, %tmp6 | %tmp7 = sub nsw <16 x i32> %tmp5, %tmp6 | ||||
%tmp10 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %tmp7, i1 false) | %tmp10 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %tmp7, i1 false) | ||||
Show All 22 Lines |
This part about SelectionDAGBuilder I think refers to the how we handled these cases before X86PartialReduction.cpp was added.