diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -5036,6 +5036,17 @@ return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); } +static bool MayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT) { + if (!MayFoldLoad(Op)) + return false; + + // We can not replace a wide volatile load with a broadcast-from-memory, + // because that would narrow the load, which isn't legal for volatiles. + const LoadSDNode *Ld = dyn_cast(Op.getNode()); + return !Ld->isVolatile() || + Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits(); +} + static bool MayFoldIntoStore(SDValue Op) { return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); } @@ -50876,7 +50887,8 @@ // concat_vectors(movddup(x),movddup(x)) -> broadcast(x) if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 && - (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0)))) + (Subtarget.hasAVX2() || MayFoldLoadIntoBroadcastFromMem( + Op0.getOperand(0), VT.getScalarType()))) return DAG.getNode(X86ISD::VBROADCAST, DL, VT, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64, Op0.getOperand(0), diff --git a/llvm/test/CodeGen/X86/pr51615.ll b/llvm/test/CodeGen/X86/pr51615.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/pr51615.ll @@ -0,0 +1,81 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX2 + +; https://bugs.llvm.org/show_bug.cgi?id=51615 +; We can not replace a wide volatile load with a broadcast-from-memory, +; because that would narrow the load, which isn't legal for volatiles. + +@g0 = external dso_local global <2 x double>, align 16 +define void @volatile_load_2_elts() { +; AVX-LABEL: volatile_load_2_elts: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps g0(%rip), %xmm0 +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 +; AVX-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2] +; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3] +; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3] +; AVX-NEXT: vmovapd %ymm0, (%rax) +; AVX-NEXT: vmovapd %ymm1, (%rax) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: volatile_load_2_elts: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps g0(%rip), %xmm0 +; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0 +; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7] +; AVX2-NEXT: vmovaps %ymm0, (%rax) +; AVX2-NEXT: vmovaps %ymm2, (%rax) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %i = load volatile <2 x double>, <2 x double>* @g0, align 16 + %i1 = shufflevector <2 x double> %i, <2 x double> poison, <4 x i32> + %shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> + store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64 + ret void +} + +@g1 = external dso_local global <1 x double>, align 16 +define void @volatile_load_1_elt() { +; ALL-LABEL: volatile_load_1_elt: +; ALL: # %bb.0: +; ALL-NEXT: vbroadcastsd g1(%rip), %ymm0 +; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7] +; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7] +; ALL-NEXT: vmovaps %ymm0, (%rax) +; ALL-NEXT: vmovaps %ymm2, (%rax) +; ALL-NEXT: vzeroupper +; ALL-NEXT: retq + %i = load volatile <1 x double>, <1 x double>* @g1, align 16 + %i1 = shufflevector <1 x double> %i, <1 x double> poison, <4 x i32> + %shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> + store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64 + ret void +} + +@g2 = external dso_local global <2 x float>, align 16 +define void @volatile_load_2_elts_bitcast() { +; ALL-LABEL: volatile_load_2_elts_bitcast: +; ALL: # %bb.0: +; ALL-NEXT: vbroadcastsd g2(%rip), %ymm0 +; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7] +; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7] +; ALL-NEXT: vmovaps %ymm0, (%rax) +; ALL-NEXT: vmovaps %ymm2, (%rax) +; ALL-NEXT: vzeroupper +; ALL-NEXT: retq + %i0 = load volatile <2 x float>, <2 x float>* @g2, align 16 + %i = bitcast <2 x float> %i0 to <1 x double> + %i1 = shufflevector <1 x double> %i, <1 x double> poison, <4 x i32> + %shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> + store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64 + ret void +}