Index: llvm/trunk/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/TargetLowering.h +++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h @@ -545,6 +545,12 @@ return false; } + /// Return true if inserting a scalar into a variable element of an undef + /// vector is more efficiently handled by splatting the scalar instead. + virtual bool shouldSplatInsEltVarIndex(EVT) const { + return false; + } + /// Return true if target supports floating point exceptions. bool hasFloatingPointExceptions() const { return HasFloatingPointExceptions; Index: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -15045,12 +15045,19 @@ InVec == InVal.getOperand(0) && EltNo == InVal.getOperand(1)) return InVec; - // We must know which element is being inserted for folds below here. auto *IndexC = dyn_cast(EltNo); - if (!IndexC) + if (!IndexC) { + // If this is variable insert to undef vector, it might be better to splat: + // inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... > + if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT)) { + SmallVector Ops(VT.getVectorNumElements(), InVal); + return DAG.getBuildVector(VT, DL, Ops); + } return SDValue(); - unsigned Elt = IndexC->getZExtValue(); + } + // We must know which element is being inserted for folds below here. + unsigned Elt = IndexC->getZExtValue(); if (SDValue Shuf = combineInsertEltToShuffle(N, Elt)) return Shuf; Index: llvm/trunk/lib/Target/X86/X86ISelLowering.h =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.h +++ llvm/trunk/lib/Target/X86/X86ISelLowering.h @@ -833,6 +833,8 @@ return VTIsOk(XVT) && VTIsOk(KeptBitsVT); } + bool shouldSplatInsEltVarIndex(EVT VT) const override; + bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { return VT.isScalarInteger(); } Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -4801,6 +4801,12 @@ return true; } +bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const { + // Any legal vector type can be splatted more efficiently than + // loading/spilling from memory. + return isTypeLegal(VT); +} + MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const { MVT VT = MVT::getIntegerVT(NumBits); if (isTypeLegal(VT)) Index: llvm/trunk/test/CodeGen/X86/insertelement-var-index.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/insertelement-var-index.ll +++ llvm/trunk/test/CodeGen/X86/insertelement-var-index.ll @@ -6,19 +6,23 @@ define <16 x i8> @arg_i8_v16i8(i8 %x, i32 %y) nounwind { ; SSE-LABEL: arg_i8_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi -; SSE-NEXT: andl $15, %esi -; SSE-NEXT: movb %dil, -24(%rsp,%rsi) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: arg_i8_v16i8: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: andl $15, %esi -; AVX-NEXT: movb %dil, -24(%rsp,%rsi) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; SSE-NEXT: movd %edi, %xmm0 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: pshufb %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX1-LABEL: arg_i8_v16i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_i8_v16i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 +; AVX2-NEXT: retq %ins = insertelement <16 x i8> undef, i8 %x, i32 %y ret <16 x i8> %ins } @@ -26,19 +30,23 @@ define <8 x i16> @arg_i16_v8i16(i16 %x, i32 %y) nounwind { ; SSE-LABEL: arg_i16_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi -; SSE-NEXT: andl $7, %esi -; SSE-NEXT: movw %di, -24(%rsp,%rsi,2) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: arg_i16_v8i16: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: movw %di, -24(%rsp,%rsi,2) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; SSE-NEXT: movd %edi, %xmm0 +; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; SSE-NEXT: retq +; +; AVX1-LABEL: arg_i16_v8i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_i16_v8i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 +; AVX2-NEXT: retq %ins = insertelement <8 x i16> undef, i16 %x, i32 %y ret <8 x i16> %ins } @@ -46,19 +54,21 @@ define <4 x i32> @arg_i32_v4i32(i32 %x, i32 %y) nounwind { ; SSE-LABEL: arg_i32_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi -; SSE-NEXT: andl $3, %esi -; SSE-NEXT: movl %edi, -24(%rsp,%rsi,4) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 +; SSE-NEXT: movd %edi, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i32_v4i32: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: movl %edi, -24(%rsp,%rsi,4) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: arg_i32_v4i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_i32_v4i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0 +; AVX2-NEXT: retq %ins = insertelement <4 x i32> undef, i32 %x, i32 %y ret <4 x i32> %ins } @@ -66,19 +76,21 @@ define <2 x i64> @arg_i64_v2i64(i64 %x, i32 %y) nounwind { ; SSE-LABEL: arg_i64_v2i64: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi -; SSE-NEXT: andl $1, %esi -; SSE-NEXT: movq %rdi, -24(%rsp,%rsi,8) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 +; SSE-NEXT: movq %rdi, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i64_v2i64: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: andl $1, %esi -; AVX-NEXT: movq %rdi, -24(%rsp,%rsi,8) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: arg_i64_v2i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_i64_v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT: retq %ins = insertelement <2 x i64> undef, i64 %x, i32 %y ret <2 x i64> %ins } @@ -86,19 +98,18 @@ define <4 x float> @arg_f32_v4f32(float %x, i32 %y) nounwind { ; SSE-LABEL: arg_f32_v4f32: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $edi killed $edi def $rdi -; SSE-NEXT: andl $3, %edi -; SSE-NEXT: movss %xmm0, -24(%rsp,%rdi,4) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: retq ; -; AVX-LABEL: arg_f32_v4f32: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $edi killed $edi def $rdi -; AVX-NEXT: andl $3, %edi -; AVX-NEXT: vmovss %xmm0, -24(%rsp,%rdi,4) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: arg_f32_v4f32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_f32_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: vbroadcastss %xmm0, %xmm0 +; AVX2-NEXT: retq %ins = insertelement <4 x float> undef, float %x, i32 %y ret <4 x float> %ins } @@ -106,18 +117,12 @@ define <2 x double> @arg_f64_v2f64(double %x, i32 %y) nounwind { ; SSE-LABEL: arg_f64_v2f64: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $edi killed $edi def $rdi -; SSE-NEXT: andl $1, %edi -; SSE-NEXT: movsd %xmm0, -24(%rsp,%rdi,8) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 +; SSE-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSE-NEXT: retq ; ; AVX-LABEL: arg_f64_v2f64: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def $edi killed $edi def $rdi -; AVX-NEXT: andl $1, %edi -; AVX-NEXT: vmovsd %xmm0, -24(%rsp,%rdi,8) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX-NEXT: retq %ins = insertelement <2 x double> undef, double %x, i32 %y ret <2 x double> %ins @@ -126,21 +131,24 @@ define <16 x i8> @load_i8_v16i8(i8* %p, i32 %y) nounwind { ; SSE-LABEL: load_i8_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi -; SSE-NEXT: movb (%rdi), %al -; SSE-NEXT: andl $15, %esi -; SSE-NEXT: movb %al, -24(%rsp,%rsi) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: load_i8_v16i8: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movb (%rdi), %al -; AVX-NEXT: andl $15, %esi -; AVX-NEXT: movb %al, -24(%rsp,%rsi) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; SSE-NEXT: movzbl (%rdi), %eax +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: pshufb %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i8_v16i8: +; AVX1: # %bb.0: +; AVX1-NEXT: movzbl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i8_v16i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastb (%rdi), %xmm0 +; AVX2-NEXT: retq %x = load i8, i8* %p %ins = insertelement <16 x i8> undef, i8 %x, i32 %y ret <16 x i8> %ins @@ -149,21 +157,24 @@ define <8 x i16> @load_i16_v8i16(i16* %p, i32 %y) nounwind { ; SSE-LABEL: load_i16_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi ; SSE-NEXT: movzwl (%rdi), %eax -; SSE-NEXT: andl $7, %esi -; SSE-NEXT: movw %ax, -24(%rsp,%rsi,2) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: load_i16_v8i16: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movzwl (%rdi), %eax -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: movw %ax, -24(%rsp,%rsi,2) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i16_v8i16: +; AVX1: # %bb.0: +; AVX1-NEXT: movzwl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i16_v8i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastw (%rdi), %xmm0 +; AVX2-NEXT: retq %x = load i16, i16* %p %ins = insertelement <8 x i16> undef, i16 %x, i32 %y ret <8 x i16> %ins @@ -172,20 +183,13 @@ define <4 x i32> @load_i32_v4i32(i32* %p, i32 %y) nounwind { ; SSE-LABEL: load_i32_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi -; SSE-NEXT: movl (%rdi), %eax -; SSE-NEXT: andl $3, %esi -; SSE-NEXT: movl %eax, -24(%rsp,%rsi,4) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: retq ; ; AVX-LABEL: load_i32_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movl (%rdi), %eax -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: movl %eax, -24(%rsp,%rsi,4) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX-NEXT: vbroadcastss (%rdi), %xmm0 ; AVX-NEXT: retq %x = load i32, i32* %p %ins = insertelement <4 x i32> undef, i32 %x, i32 %y @@ -195,21 +199,20 @@ define <2 x i64> @load_i64_v2i64(i64* %p, i32 %y) nounwind { ; SSE-LABEL: load_i64_v2i64: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi -; SSE-NEXT: movq (%rdi), %rax -; SSE-NEXT: andl $1, %esi -; SSE-NEXT: movq %rax, -24(%rsp,%rsi,8) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE-NEXT: retq ; -; AVX-LABEL: load_i64_v2i64: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movq (%rdi), %rax -; AVX-NEXT: andl $1, %esi -; AVX-NEXT: movq %rax, -24(%rsp,%rsi,8) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: load_i64_v2i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i64_v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0 +; AVX2-NEXT: retq %x = load i64, i64* %p %ins = insertelement <2 x i64> undef, i64 %x, i32 %y ret <2 x i64> %ins @@ -218,20 +221,13 @@ define <4 x float> @load_f32_v4f32(float* %p, i32 %y) nounwind { ; SSE-LABEL: load_f32_v4f32: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-NEXT: andl $3, %esi -; SSE-NEXT: movss %xmm0, -24(%rsp,%rsi,4) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: retq ; ; AVX-LABEL: load_f32_v4f32: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: vmovss %xmm0, -24(%rsp,%rsi,4) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX-NEXT: vbroadcastss (%rdi), %xmm0 ; AVX-NEXT: retq %x = load float, float* %p %ins = insertelement <4 x float> undef, float %x, i32 %y @@ -241,20 +237,12 @@ define <2 x double> @load_f64_v2f64(double* %p, i32 %y) nounwind { ; SSE-LABEL: load_f64_v2f64: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def $esi killed $esi def $rsi -; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; SSE-NEXT: andl $1, %esi -; SSE-NEXT: movsd %xmm0, -24(%rsp,%rsi,8) -; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 +; SSE-NEXT: movddup {{.*#+}} xmm0 = mem[0,0] ; SSE-NEXT: retq ; ; AVX-LABEL: load_f64_v2f64: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; AVX-NEXT: andl $1, %esi -; AVX-NEXT: vmovsd %xmm0, -24(%rsp,%rsi,8) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX-NEXT: retq %x = load double, double* %p %ins = insertelement <2 x double> undef, double %x, i32 %y @@ -277,19 +265,19 @@ ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i8_v32i8: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: andl $31, %esi -; AVX-NEXT: movb %dil, (%rsp,%rsi) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1-LABEL: arg_i8_v32i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_i8_v32i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0 +; AVX2-NEXT: retq %ins = insertelement <32 x i8> undef, i8 %x, i32 %y ret <32 x i8> %ins } @@ -310,19 +298,19 @@ ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i16_v16i16: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: andl $15, %esi -; AVX-NEXT: movw %di, (%rsp,%rsi,2) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1-LABEL: arg_i16_v16i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_i16_v16i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 +; AVX2-NEXT: retq %ins = insertelement <16 x i16> undef, i16 %x, i32 %y ret <16 x i16> %ins } @@ -343,19 +331,18 @@ ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i32_v8i32: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: movl %edi, (%rsp,%rsi,4) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1-LABEL: arg_i32_v8i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_i32_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0 +; AVX2-NEXT: retq %ins = insertelement <8 x i32> undef, i32 %x, i32 %y ret <8 x i32> %ins } @@ -376,19 +363,18 @@ ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i64_v4i64: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: movq %rdi, (%rsp,%rsi,8) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1-LABEL: arg_i64_v4i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_i64_v4i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0 +; AVX2-NEXT: retq %ins = insertelement <4 x i64> undef, i64 %x, i32 %y ret <4 x i64> %ins } @@ -409,19 +395,16 @@ ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX-LABEL: arg_f32_v8f32: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $edi killed $edi def $rdi -; AVX-NEXT: andl $7, %edi -; AVX-NEXT: vmovss %xmm0, (%rsp,%rdi,4) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1-LABEL: arg_f32_v8f32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_f32_v8f32: +; AVX2: # %bb.0: +; AVX2-NEXT: vbroadcastss %xmm0, %ymm0 +; AVX2-NEXT: retq %ins = insertelement <8 x float> undef, float %x, i32 %y ret <8 x float> %ins } @@ -442,19 +425,16 @@ ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX-LABEL: arg_f64_v4f64: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $edi killed $edi def $rdi -; AVX-NEXT: andl $3, %edi -; AVX-NEXT: vmovsd %xmm0, (%rsp,%rdi,8) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1-LABEL: arg_f64_v4f64: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: arg_f64_v4f64: +; AVX2: # %bb.0: +; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0 +; AVX2-NEXT: retq %ins = insertelement <4 x double> undef, double %x, i32 %y ret <4 x double> %ins } @@ -476,20 +456,19 @@ ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX-LABEL: load_i8_v32i8: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movb (%rdi), %al -; AVX-NEXT: andl $31, %esi -; AVX-NEXT: movb %al, (%rsp,%rsi) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1-LABEL: load_i8_v32i8: +; AVX1: # %bb.0: +; AVX1-NEXT: movzbl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i8_v32i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastb (%rdi), %ymm0 +; AVX2-NEXT: retq %x = load i8, i8* %p %ins = insertelement <32 x i8> undef, i8 %x, i32 %y ret <32 x i8> %ins @@ -512,20 +491,19 @@ ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX-LABEL: load_i16_v16i16: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movzwl (%rdi), %eax -; AVX-NEXT: andl $15, %esi -; AVX-NEXT: movw %ax, (%rsp,%rsi,2) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1-LABEL: load_i16_v16i16: +; AVX1: # %bb.0: +; AVX1-NEXT: movzwl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i16_v16i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastw (%rdi), %ymm0 +; AVX2-NEXT: retq %x = load i16, i16* %p %ins = insertelement <16 x i16> undef, i16 %x, i32 %y ret <16 x i16> %ins @@ -550,17 +528,7 @@ ; ; AVX-LABEL: load_i32_v8i32: ; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movl (%rdi), %eax -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: movl %eax, (%rsp,%rsi,4) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp +; AVX-NEXT: vbroadcastss (%rdi), %ymm0 ; AVX-NEXT: retq %x = load i32, i32* %p %ins = insertelement <8 x i32> undef, i32 %x, i32 %y @@ -586,17 +554,7 @@ ; ; AVX-LABEL: load_i64_v4i64: ; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movq (%rdi), %rax -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: movq %rax, (%rsp,%rsi,8) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp +; AVX-NEXT: vbroadcastsd (%rdi), %ymm0 ; AVX-NEXT: retq %x = load i64, i64* %p %ins = insertelement <4 x i64> undef, i64 %x, i32 %y @@ -622,17 +580,7 @@ ; ; AVX-LABEL: load_f32_v8f32: ; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: vmovss %xmm0, (%rsp,%rsi,4) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp +; AVX-NEXT: vbroadcastss (%rdi), %ymm0 ; AVX-NEXT: retq %x = load float, float* %p %ins = insertelement <8 x float> undef, float %x, i32 %y @@ -658,17 +606,7 @@ ; ; AVX-LABEL: load_f64_v4f64: ; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: vmovsd %xmm0, (%rsp,%rsi,8) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp +; AVX-NEXT: vbroadcastsd (%rdi), %ymm0 ; AVX-NEXT: retq %x = load double, double* %p %ins = insertelement <4 x double> undef, double %x, i32 %y