diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3129,6 +3129,16 @@ Known = KnownBits::udiv(Known, Known2); break; } + case ISD::AVGCEILU: { + Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); + Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); + Known = Known.zext(BitWidth + 1); + Known2 = Known2.zext(BitWidth + 1); + KnownBits One = KnownBits::makeConstant(APInt(1, 1)); + Known = KnownBits::computeForAddCarry(Known, Known2, One); + Known = Known.extractBits(BitWidth, 1); + break; + } case ISD::SELECT: case ISD::VSELECT: Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); diff --git a/llvm/test/CodeGen/X86/combine-pavg.ll b/llvm/test/CodeGen/X86/combine-pavg.ll --- a/llvm/test/CodeGen/X86/combine-pavg.ll +++ b/llvm/test/CodeGen/X86/combine-pavg.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone @@ -21,7 +21,6 @@ ret <16 x i8> %1 } -; TODO: Failure to remove masks as we know the upper bits are zero define <16 x i8> @combine_pavgw_knownbits(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) { ; SSE-LABEL: combine_pavgw_knownbits: ; SSE: # %bb.0: @@ -32,41 +31,20 @@ ; SSE-NEXT: pand %xmm4, %xmm2 ; SSE-NEXT: pand %xmm4, %xmm3 ; SSE-NEXT: pavgw %xmm2, %xmm3 -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] -; SSE-NEXT: pand %xmm1, %xmm3 -; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: packuswb %xmm3, %xmm0 ; SSE-NEXT: retq ; -; AVX1-LABEL: combine_pavgw_knownbits: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31] -; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm1 -; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm2 -; AVX1-NEXT: vpavgw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] -; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: combine_pavgw_knownbits: -; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31] -; AVX2-NEXT: vpand %xmm4, %xmm0, %xmm0 -; AVX2-NEXT: vpand %xmm4, %xmm1, %xmm1 -; AVX2-NEXT: vpavgw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpand %xmm4, %xmm2, %xmm1 -; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm2 -; AVX2-NEXT: vpavgw %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: retq +; AVX-LABEL: combine_pavgw_knownbits: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31] +; AVX-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm4, %xmm2, %xmm1 +; AVX-NEXT: vpand %xmm4, %xmm3, %xmm2 +; AVX-NEXT: vpavgw %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %m0 = and <8 x i16> %a0, %m1 = and <8 x i16> %a1, %m2 = and <8 x i16> %a2,