diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -42951,7 +42951,7 @@ static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { - MaskedLoadSDNode *Mld = cast(N); + auto *Mld = cast(N); // TODO: Expanding load with constant mask may be optimized as well. if (Mld->isExpandingLoad()) @@ -42960,12 +42960,33 @@ if (Mld->getExtensionType() == ISD::NON_EXTLOAD) { if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI)) return ScalarLoad; + // TODO: Do some AVX512 subsets benefit from this transform? if (!Subtarget.hasAVX512()) if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI)) return Blend; } + // If the mask value has been legalized to a non-boolean vector, try to + // simplify ops leading up to it. We only demand the MSB of each lane. + SDValue Mask = Mld->getMask(); + if (Mask.getScalarValueSizeInBits() != 1) { + EVT VT = Mld->getValueType(0); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits())); + if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) { + if (N->getOpcode() != ISD::DELETED_NODE) + DCI.AddToWorklist(N); + return SDValue(N, 0); + } + if (SDValue NewMask = + TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG)) + return DAG.getMaskedLoad( + VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(), + NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(), + Mld->getAddressingMode(), Mld->getExtensionType()); + } + return SDValue(); } diff --git a/llvm/test/CodeGen/X86/masked_load.ll b/llvm/test/CodeGen/X86/masked_load.ll --- a/llvm/test/CodeGen/X86/masked_load.ll +++ b/llvm/test/CodeGen/X86/masked_load.ll @@ -1163,10 +1163,8 @@ ; AVX1: ## %bb.0: ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -1175,7 +1173,6 @@ ; AVX2: ## %bb.0: ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 -; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq ; @@ -2416,10 +2413,8 @@ ; AVX1: ## %bb.0: ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 -; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; AVX1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 ; AVX1-NEXT: vblendvps %ymm0, %ymm2, %ymm1, %ymm0 @@ -2429,7 +2424,6 @@ ; AVX2: ## %bb.0: ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 -; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 ; AVX2-NEXT: vblendvps %ymm0, %ymm2, %ymm1, %ymm0 ; AVX2-NEXT: retq @@ -2612,10 +2606,8 @@ ; AVX1: ## %bb.0: ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -2624,7 +2616,6 @@ ; AVX2: ## %bb.0: ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 -; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/pr45563-2.ll b/llvm/test/CodeGen/X86/pr45563-2.ll --- a/llvm/test/CodeGen/X86/pr45563-2.ll +++ b/llvm/test/CodeGen/X86/pr45563-2.ll @@ -28,7 +28,6 @@ ; CHECK-NEXT: vpinsrw $3, %r8d, %xmm2, %xmm2 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 -; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3 ; CHECK-NEXT: vpinsrw $4, %r9d, %xmm2, %xmm2 ; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx ; CHECK-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 @@ -38,14 +37,12 @@ ; CHECK-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm2 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm2, %xmm2 -; CHECK-NEXT: vpsrad $31, %xmm2, %xmm2 ; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; CHECK-NEXT: vmaskmovps (%rdi), %ymm2, %ymm3 ; CHECK-NEXT: vblendvps %ymm2, %ymm3, %ymm0, %ymm0 ; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx ; CHECK-NEXT: vmovd %ecx, %xmm2 ; CHECK-NEXT: vpslld $31, %xmm2, %xmm2 -; CHECK-NEXT: vpsrad $31, %xmm2, %xmm2 ; CHECK-NEXT: vmaskmovps 32(%rdi), %ymm2, %ymm3 ; CHECK-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1 ; CHECK-NEXT: vmovss %xmm1, 32(%rax) @@ -79,7 +76,6 @@ ; CHECK-NEXT: vpinsrw $3, %r8d, %xmm3, %xmm3 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: vpinsrw $4, %r9d, %xmm3, %xmm3 ; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx ; CHECK-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3 @@ -89,7 +85,6 @@ ; CHECK-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm3 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 -; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3 ; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; CHECK-NEXT: vmaskmovps (%rdi), %ymm3, %ymm4 ; CHECK-NEXT: vblendvps %ymm3, %ymm4, %ymm2, %ymm2 @@ -103,12 +98,10 @@ ; CHECK-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx ; CHECK-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 -; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3 ; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm5 ; CHECK-NEXT: vmaskmovps 32(%rdi), %ymm5, %ymm5 ; CHECK-NEXT: vblendvps %xmm4, %xmm5, %xmm1, %xmm1 @@ -147,7 +140,6 @@ ; CHECK-NEXT: vpinsrw $3, %r8d, %xmm3, %xmm3 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: vpinsrw $4, %r9d, %xmm3, %xmm3 ; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx ; CHECK-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3 @@ -157,7 +149,6 @@ ; CHECK-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm3 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 -; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3 ; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; CHECK-NEXT: vmaskmovps (%rdi), %ymm3, %ymm4 ; CHECK-NEXT: vblendvps %ymm3, %ymm4, %ymm2, %ymm2 @@ -171,14 +162,12 @@ ; CHECK-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx ; CHECK-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3 ; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx ; CHECK-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 -; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3 ; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm5 ; CHECK-NEXT: vmaskmovps 32(%rdi), %ymm5, %ymm5 ; CHECK-NEXT: vextractf128 $1, %ymm5, %xmm6 @@ -222,14 +211,12 @@ ; CHECK-NEXT: vpinsrb $6, %r8d, %xmm3, %xmm3 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $8, %r9d, %xmm3, %xmm3 ; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm3, %xmm3 ; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm3, %xmm3 ; CHECK-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm3, %xmm3 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 -; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3 ; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; CHECK-NEXT: vmaskmovps (%r10), %ymm3, %ymm4 ; CHECK-NEXT: vblendvps %ymm3, %ymm4, %ymm2, %ymm2 @@ -239,21 +226,18 @@ ; CHECK-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm3, %xmm3 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm3, %xmm3 ; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm3, %xmm3 ; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm3, %xmm3 ; CHECK-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm3, %xmm3 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 -; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3 ; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; CHECK-NEXT: vmaskmovps 32(%r10), %ymm3, %ymm4 ; CHECK-NEXT: vblendvps %ymm3, %ymm4, %ymm1, %ymm1 ; CHECK-NEXT: vmovd %edi, %xmm3 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 -; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3 ; CHECK-NEXT: vmaskmovps 64(%r10), %ymm3, %ymm4 ; CHECK-NEXT: vblendvps %xmm3, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vmovss %xmm0, 64(%rax) @@ -300,14 +284,12 @@ ; CHECK-NEXT: vpinsrb $6, %r8d, %xmm4, %xmm4 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero ; CHECK-NEXT: vpslld $31, %xmm5, %xmm5 -; CHECK-NEXT: vpsrad $31, %xmm5, %xmm5 ; CHECK-NEXT: vpinsrb $8, %r9d, %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 ; CHECK-NEXT: vmaskmovps (%r10), %ymm4, %ymm5 ; CHECK-NEXT: vblendvps %ymm4, %ymm5, %ymm3, %ymm3 @@ -317,14 +299,12 @@ ; CHECK-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero ; CHECK-NEXT: vpslld $31, %xmm5, %xmm5 -; CHECK-NEXT: vpsrad $31, %xmm5, %xmm5 ; CHECK-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 ; CHECK-NEXT: vmaskmovps 32(%r10), %ymm4, %ymm5 ; CHECK-NEXT: vblendvps %ymm4, %ymm5, %ymm2, %ymm2 @@ -334,13 +314,11 @@ ; CHECK-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero ; CHECK-NEXT: vpslld $31, %xmm5, %xmm5 -; CHECK-NEXT: vpsrad $31, %xmm5, %xmm5 ; CHECK-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm4, %xmm4 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm4, %xmm4 -; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4 ; CHECK-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm6 ; CHECK-NEXT: vmaskmovps 64(%r10), %ymm6, %ymm6 ; CHECK-NEXT: vmovaps %ymm2, 32(%rax)