diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -23857,7 +23857,8 @@ for (unsigned i = 0; i != NumElts; ++i) { SDValue CurrentOp = SrcOp->getOperand(i); if (CurrentOp->isUndef()) { - Elts.push_back(CurrentOp); + // Must produce 0s in the correct bits. + Elts.push_back(DAG.getConstant(0, dl, ElementType)); continue; } auto *ND = cast(CurrentOp); @@ -23869,7 +23870,8 @@ for (unsigned i = 0; i != NumElts; ++i) { SDValue CurrentOp = SrcOp->getOperand(i); if (CurrentOp->isUndef()) { - Elts.push_back(CurrentOp); + // Must produce 0s in the correct bits. + Elts.push_back(DAG.getConstant(0, dl, ElementType)); continue; } auto *ND = cast(CurrentOp); @@ -23881,7 +23883,8 @@ for (unsigned i = 0; i != NumElts; ++i) { SDValue CurrentOp = SrcOp->getOperand(i); if (CurrentOp->isUndef()) { - Elts.push_back(CurrentOp); + // All shifted in bits must be the same so use 0. + Elts.push_back(DAG.getConstant(0, dl, ElementType)); continue; } auto *ND = cast(CurrentOp); diff --git a/llvm/test/CodeGen/X86/vec_shift5.ll b/llvm/test/CodeGen/X86/vec_shift5.ll --- a/llvm/test/CodeGen/X86/vec_shift5.ll +++ b/llvm/test/CodeGen/X86/vec_shift5.ll @@ -121,12 +121,12 @@ define <8 x i16> @test9() { ; X32-LABEL: test9: ; X32: # %bb.0: -; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16> +; X32-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16] ; X32-NEXT: retl ; ; X64-LABEL: test9: ; X64: # %bb.0: -; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16> +; X64-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16] ; X64-NEXT: retq %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> , i32 3) ret <8 x i16> %1 @@ -135,12 +135,12 @@ define <4 x i32> @test10() { ; X32-LABEL: test10: ; X32: # %bb.0: -; X32-NEXT: movaps {{.*#+}} xmm0 = +; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,0,4] ; X32-NEXT: retl ; ; X64-LABEL: test10: ; X64: # %bb.0: -; X64-NEXT: movaps {{.*#+}} xmm0 = +; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,0,4] ; X64-NEXT: retq %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> , i32 3) ret <4 x i32> %1 @@ -154,7 +154,7 @@ ; ; X64-LABEL: test11: ; X64: # %bb.0: -; X64-NEXT: movaps {{.*#+}} xmm0 = +; X64-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0] ; X64-NEXT: retq %1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> , i32 3) ret <2 x i64> %1 @@ -163,12 +163,12 @@ define <8 x i16> @test12() { ; X32-LABEL: test12: ; X32: # %bb.0: -; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16> +; X32-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16] ; X32-NEXT: retl ; ; X64-LABEL: test12: ; X64: # %bb.0: -; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16> +; X64-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16] ; X64-NEXT: retq %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> , i32 3) ret <8 x i16> %1 @@ -177,12 +177,12 @@ define <4 x i32> @test13() { ; X32-LABEL: test13: ; X32: # %bb.0: -; X32-NEXT: movaps {{.*#+}} xmm0 = +; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,0,4] ; X32-NEXT: retl ; ; X64-LABEL: test13: ; X64: # %bb.0: -; X64-NEXT: movaps {{.*#+}} xmm0 = +; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,0,4] ; X64-NEXT: retq %1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> , i32 3) ret <4 x i32> %1 @@ -191,12 +191,12 @@ define <8 x i16> @test14() { ; X32-LABEL: test14: ; X32: # %bb.0: -; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16> +; X32-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16] ; X32-NEXT: retl ; ; X64-LABEL: test14: ; X64: # %bb.0: -; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16> +; X64-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16] ; X64-NEXT: retq %1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> , i32 3) ret <8 x i16> %1 @@ -205,12 +205,12 @@ define <4 x i32> @test15() { ; X32-LABEL: test15: ; X32: # %bb.0: -; X32-NEXT: movaps {{.*#+}} xmm0 = +; X32-NEXT: movaps {{.*#+}} xmm0 = [0,64,0,256] ; X32-NEXT: retl ; ; X64-LABEL: test15: ; X64: # %bb.0: -; X64-NEXT: movaps {{.*#+}} xmm0 = +; X64-NEXT: movaps {{.*#+}} xmm0 = [0,64,0,256] ; X64-NEXT: retq %1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> , i32 3) ret <4 x i32> %1 @@ -224,7 +224,7 @@ ; ; X64-LABEL: test16: ; X64: # %bb.0: -; X64-NEXT: movaps {{.*#+}} xmm0 = +; X64-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,248,0,0,0,0,0,0,0] ; X64-NEXT: retq %1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> , i32 3) ret <2 x i64> %1