diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -33435,8 +33435,19 @@ if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR) return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0)); + // Share broadcast with the longest vector and extract low subvector (free). + for (SDNode *User : Src->uses()) + if (User != N.getNode() && + (User->getOpcode() == X86ISD::VBROADCAST || + User->getOpcode() == X86ISD::VBROADCAST_LOAD) && + User->getValueSizeInBits(0) > VT.getSizeInBits()) { + return extractSubVector(SDValue(User, 0), 0, DAG, DL, + VT.getSizeInBits()); + } + // vbroadcast(scalarload X) -> vbroadcast_load X - if (!SrcVT.isVector() && Src.hasOneUse() && + // For float loads, extract other uses of the scalar from the broadcast. + if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) && ISD::isNormalLoad(Src.getNode())) { LoadSDNode *LN = cast(Src); SDVTList Tys = DAG.getVTList(VT, MVT::Other); @@ -33444,18 +33455,14 @@ SDValue BcastLd = DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, LN->getMemoryVT(), LN->getMemOperand()); + if (!Src.hasOneUse()) + DAG.ReplaceAllUsesOfValueWith( + Src, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd, + DAG.getIntPtrConstant(0, DL))); DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1)); return BcastLd; } - // Share broadcast with the longest vector and extract low subvector (free). - for (SDNode *User : Src->uses()) - if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST && - User->getValueSizeInBits(0) > VT.getSizeInBits()) { - return extractSubVector(SDValue(User, 0), 0, DAG, DL, - VT.getSizeInBits()); - } - return SDValue(); } case X86ISD::BLENDI: { diff --git a/llvm/test/CodeGen/X86/avx-vbroadcast.ll b/llvm/test/CodeGen/X86/avx-vbroadcast.ll --- a/llvm/test/CodeGen/X86/avx-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx-vbroadcast.ll @@ -159,18 +159,14 @@ ; X32: ## %bb.0: ## %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X32-NEXT: vmovsd %xmm0, (%eax) -; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastsd (%ecx), %ymm0 +; X32-NEXT: vmovlps %xmm0, (%eax) ; X32-NEXT: retl ; ; X64-LABEL: C2: ; X64: ## %bb.0: ## %entry -; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X64-NEXT: vmovsd %xmm0, (%rsi) -; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastsd (%rdi), %ymm0 +; X64-NEXT: vmovlps %xmm0, (%rsi) ; X64-NEXT: retq entry: %q = load double, double* %ptr, align 8 @@ -231,18 +227,14 @@ ; X32: ## %bb.0: ## %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-NEXT: vbroadcastss (%ecx), %ymm0 ; X32-NEXT: vmovss %xmm0, (%eax) -; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0] -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: D3: ; X64: ## %bb.0: ## %entry -; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X64-NEXT: vbroadcastss (%rdi), %ymm0 ; X64-NEXT: vmovss %xmm0, (%rsi) -; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0] -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq entry: %q = load float, float* %ptr, align 4 @@ -285,16 +277,14 @@ ; X32: ## %bb.0: ## %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-NEXT: vbroadcastss (%ecx), %xmm0 ; X32-NEXT: vmovss %xmm0, (%eax) -; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X32-NEXT: retl ; ; X64-LABEL: e2: ; X64: ## %bb.0: ## %entry -; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X64-NEXT: vbroadcastss (%rdi), %xmm0 ; X64-NEXT: vmovss %xmm0, (%rsi) -; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X64-NEXT: retq entry: %q = load float, float* %ptr, align 4 @@ -669,16 +659,14 @@ ; X32: ## %bb.0: ## %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X32-NEXT: vmovsd %xmm0, (%eax) -; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X32-NEXT: vmovlps %xmm0, (%eax) ; X32-NEXT: retl ; ; X64-LABEL: I2: ; X64: ## %bb.0: ## %entry -; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X64-NEXT: vmovsd %xmm0, (%rsi) -; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X64-NEXT: vmovlps %xmm0, (%rsi) ; X64-NEXT: retq entry: %q = load double, double* %ptr, align 4 @@ -884,7 +872,6 @@ ; ; Broadcast scale factor for xyz vector - slp will have vectorized xy. -; FIXME: Load as a broadcast and then use the scalar 0'th element. ; define double @broadcast_scale_xyz(double* nocapture readonly, double* nocapture readonly) nounwind { ; X32-LABEL: broadcast_scale_xyz: @@ -892,9 +879,8 @@ ; X32-NEXT: subl $12, %esp ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X32-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0] -; X32-NEXT: vmulpd (%eax), %xmm1, %xmm1 +; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X32-NEXT: vmulpd (%eax), %xmm0, %xmm1 ; X32-NEXT: vmulsd 16(%eax), %xmm0, %xmm0 ; X32-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; X32-NEXT: vaddsd %xmm2, %xmm1, %xmm1 @@ -906,9 +892,8 @@ ; ; X64-LABEL: broadcast_scale_xyz: ; X64: ## %bb.0: -; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X64-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0] -; X64-NEXT: vmulpd (%rsi), %xmm1, %xmm1 +; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X64-NEXT: vmulpd (%rsi), %xmm0, %xmm1 ; X64-NEXT: vmulsd 16(%rsi), %xmm0, %xmm0 ; X64-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; X64-NEXT: vaddsd %xmm2, %xmm1, %xmm1