diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -50993,7 +50993,11 @@ if (IsSplat && (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) { // If this broadcast is inserted into both halves, use a larger broadcast. - if (Op0.getOpcode() == X86ISD::VBROADCAST) + // Before AVX2, we don't have vbroadcast instruction(128->256), we prohibit + // creating such vbroadcast. + if (Op0.getOpcode() == X86ISD::VBROADCAST && + !(!Subtarget.hasAVX2() && VT.is256BitVector() && + Op0.getOperand(0).getValueType().is128BitVector())) return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0)); // If this scalar/subvector broadcast_load is inserted into both halves, use diff --git a/llvm/test/CodeGen/X86/combine-concatvectors.ll b/llvm/test/CodeGen/X86/combine-concatvectors.ll --- a/llvm/test/CodeGen/X86/combine-concatvectors.ll +++ b/llvm/test/CodeGen/X86/combine-concatvectors.ll @@ -37,3 +37,38 @@ %extr1 = extractelement <2 x float> %bc, i64 0 unreachable } + +@qa_ = external unnamed_addr global [49216 x i8], align 32 + +define void @prohibit_combine_concat_into_vbroadcast() { +; CHECK-LABEL: prohibit_combine_concat_into_vbroadcast: +; CHECK: # %bb.0: # %alloca_0 +; CHECK-NEXT: movq qa_@GOTPCREL(%rip), %rax +; CHECK-NEXT: movl $1091567616, 30256(%rax) # imm = 0x41100000 +; CHECK-NEXT: movabsq $4294967297, %rcx # imm = 0x100000001 +; CHECK-NEXT: movq %rcx, 46348(%rax) +; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = <1.0E+0,1.0E+0,u,u> +; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0] +; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1 +; CHECK-NEXT: vmovups %ymm1, 48296(%rax) +; CHECK-NEXT: vmovlps %xmm0, 47372(%rax) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +alloca_0: + store float 9.000000e+00, float* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 30256) to float*), align 16 + store <2 x i32> , <2 x i32>* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 46348) to <2 x i32>*), align 4 + br label %loop.4942 + +loop.4942: ; preds = %loop.4942, %alloca_0 + br i1 undef, label %loop.4942, label %ifmerge.1298 + +ifmerge.1298: ; preds = %loop.4942 + %gepload4638 = load float, float* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 28324) to float*), align 4 + store <2 x float> , <2 x float>* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 48296) to <2 x float>*), align 8 + store <2 x float> , <2 x float>* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 48304) to <2 x float>*), align 16 + store <2 x float> , <2 x float>* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 48312) to <2 x float>*), align 8 + store <2 x float> , <2 x float>* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 48320) to <2 x float>*), align 32 + store <2 x float> , <2 x float>* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 47372) to <2 x float>*), align 4 + ret void +} +