Index: lib/Target/X86/X86InstrSSE.td =================================================================== --- lib/Target/X86/X86InstrSSE.td +++ lib/Target/X86/X86InstrSSE.td @@ -6397,6 +6397,50 @@ return getI8Imm(Imm ^ 0xff, SDLoc(N)); }]>; +// Turn a 4-bit blendi immediate to 8-bit for use with pblendw. +def BlendScaleImm4 : SDNodeXFormgetZExtValue(); + uint8_t NewImm = 0; + for (unsigned i = 0; i != 4; ++i) { + if (Imm & (1 << i)) + NewImm |= 0x3 << (i * 2); + } + return getI8Imm(NewImm, SDLoc(N)); +}]>; + +// Turn a 2-bit blendi immediate to 8-bit for use with pblendw. +def BlendScaleImm2 : SDNodeXFormgetZExtValue(); + uint8_t NewImm = 0; + for (unsigned i = 0; i != 2; ++i) { + if (Imm & (1 << i)) + NewImm |= 0xf << (i * 4); + } + return getI8Imm(NewImm, SDLoc(N)); +}]>; + +// Turn a 4-bit blendi immediate to 8-bit for use with pblendw and invert it. +def BlendScaleCommuteImm4 : SDNodeXFormgetZExtValue(); + uint8_t NewImm = 0; + for (unsigned i = 0; i != 4; ++i) { + if (Imm & (1 << i)) + NewImm |= 0x3 << (i * 2); + } + return getI8Imm(NewImm ^ 0xff, SDLoc(N)); +}]>; + +// Turn a 2-bit blendi immediate to 8-bit for use with pblendw and invert it. +def BlendScaleCommuteImm2 : SDNodeXFormgetZExtValue(); + uint8_t NewImm = 0; + for (unsigned i = 0; i != 2; ++i) { + if (Imm & (1 << i)) + NewImm |= 0xf << (i * 4); + } + return getI8Imm(NewImm ^ 0xff, SDLoc(N)); +}]>; + let Predicates = [HasAVX] in { let isCommutable = 0 in { defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw, @@ -6507,7 +6551,7 @@ VEX_4V, VEX_L, VEX_WIG; } -// Emulate vXi32/vXi64 blends with vXf32/vXf64. +// Emulate vXi32/vXi64 blends with vXf32/vXf64 or pblendw. // ExecutionDomainFixPass will cleanup domains later on. let Predicates = [HasAVX] in { def : Pat<(X86Blendi (v4i64 VR256:$src1), (v4i64 VR256:$src2), imm:$src3), @@ -6517,12 +6561,14 @@ def : Pat<(X86Blendi (loadv4i64 addr:$src2), VR256:$src1, imm:$src3), (VBLENDPDYrmi VR256:$src1, addr:$src2, (BlendCommuteImm4 imm:$src3))>; +// Use pblendw for 128-bit integer to keep it in the integer domain and prevent +// it from becoming movsd via commuting under optsize. def : Pat<(X86Blendi (v2i64 VR128:$src1), (v2i64 VR128:$src2), imm:$src3), - (VBLENDPDrri VR128:$src1, VR128:$src2, imm:$src3)>; + (VPBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm2 imm:$src3))>; def : Pat<(X86Blendi VR128:$src1, (loadv2i64 addr:$src2), imm:$src3), - (VBLENDPDrmi VR128:$src1, addr:$src2, imm:$src3)>; + (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm2 imm:$src3))>; def : Pat<(X86Blendi (loadv2i64 addr:$src2), VR128:$src1, imm:$src3), - (VBLENDPDrmi VR128:$src1, addr:$src2, (BlendCommuteImm2 imm:$src3))>; + (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm2 imm:$src3))>; } let Predicates = [HasAVX1Only] in { @@ -6533,12 +6579,14 @@ def : Pat<(X86Blendi (loadv8i32 addr:$src2), VR256:$src1, imm:$src3), (VBLENDPSYrmi VR256:$src1, addr:$src2, (BlendCommuteImm8 imm:$src3))>; +// Use pblendw for 128-bit integer to keep it in the integer domain and prevent +// it from becoming movss via commuting under optsize. def : Pat<(X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2), imm:$src3), - (VBLENDPSrri VR128:$src1, VR128:$src2, imm:$src3)>; + (VPBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm4 imm:$src3))>; def : Pat<(X86Blendi VR128:$src1, (loadv4i32 addr:$src2), imm:$src3), - (VBLENDPSrmi VR128:$src1, addr:$src2, imm:$src3)>; + (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm4 imm:$src3))>; def : Pat<(X86Blendi (loadv4i32 addr:$src2), VR128:$src1, imm:$src3), - (VBLENDPSrmi VR128:$src1, addr:$src2, (BlendCommuteImm4 imm:$src3))>; + (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm4 imm:$src3))>; } defm BLENDPS : SS41I_blend_rmi<0x0C, "blendps", X86Blendi, v4f32, @@ -6552,19 +6600,21 @@ SchedWriteBlend.XMM, BlendCommuteImm8>; let Predicates = [UseSSE41] in { +// Use pblendw for 128-bit integer to keep it in the integer domain and prevent +// it from becoming movss via commuting under optsize. def : Pat<(X86Blendi (v2i64 VR128:$src1), (v2i64 VR128:$src2), imm:$src3), - (BLENDPDrri VR128:$src1, VR128:$src2, imm:$src3)>; + (PBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm2 imm:$src3))>; def : Pat<(X86Blendi VR128:$src1, (memopv2i64 addr:$src2), imm:$src3), - (BLENDPDrmi VR128:$src1, addr:$src2, imm:$src3)>; + (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm2 imm:$src3))>; def : Pat<(X86Blendi (memopv2i64 addr:$src2), VR128:$src1, imm:$src3), - (BLENDPDrmi VR128:$src1, addr:$src2, (BlendCommuteImm2 imm:$src3))>; + (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm2 imm:$src3))>; def : Pat<(X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2), imm:$src3), - (BLENDPSrri VR128:$src1, VR128:$src2, imm:$src3)>; + (PBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm4 imm:$src3))>; def : Pat<(X86Blendi VR128:$src1, (memopv4i32 addr:$src2), imm:$src3), - (BLENDPSrmi VR128:$src1, addr:$src2, imm:$src3)>; + (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm4 imm:$src3))>; def : Pat<(X86Blendi (memopv4i32 addr:$src2), VR128:$src1, imm:$src3), - (BLENDPSrmi VR128:$src1, addr:$src2, (BlendCommuteImm4 imm:$src3))>; + (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm4 imm:$src3))>; } // For insertion into the zero index (low half) of a 256-bit vector, it is Index: test/CodeGen/X86/avx512-shuffles/partial_permute.ll =================================================================== --- test/CodeGen/X86/avx512-shuffles/partial_permute.ll +++ test/CodeGen/X86/avx512-shuffles/partial_permute.ll @@ -1913,7 +1913,7 @@ ; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mem_mask1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovdqa 16(%rdi), %xmm2 -; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],mem[4,5,6,7] ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 ; CHECK-NEXT: vmovdqa64 %xmm2, %xmm0 {%k1} ; CHECK-NEXT: retq @@ -1928,7 +1928,7 @@ ; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mem_mask1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovdqa 16(%rdi), %xmm1 -; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],mem[4,5,6,7] ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 ; CHECK-NEXT: vmovdqa64 %xmm1, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2564,7 +2564,7 @@ ; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovdqa 32(%rdi), %xmm2 -; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],mem[4,5,6,7] ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 ; CHECK-NEXT: vmovdqa64 %xmm2, %xmm0 {%k1} ; CHECK-NEXT: retq @@ -2579,7 +2579,7 @@ ; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovdqa 32(%rdi), %xmm1 -; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],mem[4,5,6,7] ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 ; CHECK-NEXT: vmovdqa64 %xmm1, %xmm0 {%k1} {z} ; CHECK-NEXT: retq Index: test/CodeGen/X86/combine-sdiv.ll =================================================================== --- test/CodeGen/X86/combine-sdiv.ll +++ test/CodeGen/X86/combine-sdiv.ll @@ -1580,7 +1580,7 @@ ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952] ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpsubq %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX2-NEXT: retq ; ; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v2i64: @@ -1593,7 +1593,7 @@ ; AVX512F-NEXT: vpsrlvq {{.*}}(%rip), %xmm2, %xmm2 ; AVX512F-NEXT: vpaddq %xmm2, %xmm0, %xmm2 ; AVX512F-NEXT: vpsravq %zmm1, %zmm2, %zmm1 -; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1606,7 +1606,7 @@ ; AVX512BW-NEXT: vmovq %rax, %xmm2 ; AVX512BW-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7] ; AVX512BW-NEXT: vpsravq %xmm2, %xmm1, %xmm1 -; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX512BW-NEXT: retq ; ; XOP-LABEL: combine_vec_sdiv_by_pow2b_v2i64: @@ -2498,7 +2498,7 @@ ; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpsraw $1, %xmm0, %xmm1 ; AVX2-NEXT: vpsraw $2, %xmm0, %xmm2 -; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: retq @@ -2508,7 +2508,7 @@ ; AVX512F-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 ; AVX512F-NEXT: vpsraw $1, %xmm0, %xmm1 ; AVX512F-NEXT: vpsraw $2, %xmm0, %xmm2 -; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX512F-NEXT: retq @@ -2580,7 +2580,7 @@ ; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: vpsraw $8, %xmm0, %xmm1 ; AVX2-NEXT: vpsraw $4, %xmm0, %xmm2 -; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: retq @@ -2591,7 +2591,7 @@ ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX512F-NEXT: vpsraw $8, %xmm0, %xmm1 ; AVX512F-NEXT: vpsraw $4, %xmm0, %xmm2 -; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX512F-NEXT: retq @@ -2665,7 +2665,7 @@ ; AVX2-NEXT: vpsubw %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: vpsraw $8, %xmm0, %xmm1 ; AVX2-NEXT: vpsraw $4, %xmm0, %xmm2 -; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: retq @@ -2676,7 +2676,7 @@ ; AVX512F-NEXT: vpsubw %xmm0, %xmm1, %xmm0 ; AVX512F-NEXT: vpsraw $8, %xmm0, %xmm1 ; AVX512F-NEXT: vpsraw $4, %xmm0, %xmm2 -; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX512F-NEXT: retq @@ -2949,26 +2949,12 @@ ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: retq ; -; AVX1-LABEL: combine_vec_sdiv_nonuniform7: -; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; AVX1-NEXT: retq -; -; AVX2ORLATER-LABEL: combine_vec_sdiv_nonuniform7: -; AVX2ORLATER: # %bb.0: -; AVX2ORLATER-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2ORLATER-NEXT: vpsubw %xmm0, %xmm1, %xmm1 -; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] -; AVX2ORLATER-NEXT: retq -; -; XOP-LABEL: combine_vec_sdiv_nonuniform7: -; XOP: # %bb.0: -; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; XOP-NEXT: vpsubw %xmm0, %xmm1, %xmm1 -; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; XOP-NEXT: retq +; AVX-LABEL: combine_vec_sdiv_nonuniform7: +; AVX: # %bb.0: +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; AVX-NEXT: retq %1 = sdiv <8 x i16> %x, ret <8 x i16> %1 } Index: test/CodeGen/X86/commute-blend-sse41.ll =================================================================== --- test/CodeGen/X86/commute-blend-sse41.ll +++ test/CodeGen/X86/commute-blend-sse41.ll @@ -54,11 +54,11 @@ define void @baz(<2 x i64>* %arg, %struct.spam* %arg1) optsize { ; CHECK-LABEL: baz: ; CHECK: # %bb.0: # %bb -; CHECK-NEXT: movapd (%rdi), %xmm0 -; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3,3] -; CHECK-NEXT: andpd %xmm0, %xmm1 -; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] -; CHECK-NEXT: movupd %xmm1, (%rsi) +; CHECK-NEXT: movaps (%rdi), %xmm0 +; CHECK-NEXT: movaps {{.*#+}} xmm1 = [3,3] +; CHECK-NEXT: andps %xmm0, %xmm1 +; CHECK-NEXT: blendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3] +; CHECK-NEXT: movups %xmm1, (%rsi) ; CHECK-NEXT: retq bb: %tmp = load <2 x i64>, <2 x i64>* %arg, align 16 Index: test/CodeGen/X86/insertelement-ones.ll =================================================================== --- test/CodeGen/X86/insertelement-ones.ll +++ test/CodeGen/X86/insertelement-ones.ll @@ -30,23 +30,11 @@ ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: retq ; -; AVX1-LABEL: insert_v2i64_x1: -; AVX1: # %bb.0: -; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; AVX1-NEXT: retq -; -; AVX2-LABEL: insert_v2i64_x1: -; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] -; AVX2-NEXT: retq -; -; AVX512-LABEL: insert_v2i64_x1: -; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] -; AVX512-NEXT: retq +; AVX-LABEL: insert_v2i64_x1: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; AVX-NEXT: retq %1 = insertelement <2 x i64> %a, i64 -1, i32 0 ret <2 x i64> %1 } Index: test/CodeGen/X86/lower-vec-shift.ll =================================================================== --- test/CodeGen/X86/lower-vec-shift.ll +++ test/CodeGen/X86/lower-vec-shift.ll @@ -43,19 +43,12 @@ ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE-NEXT: retq ; -; AVX1-LABEL: test2: -; AVX1: # %bb.0: -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; AVX1-NEXT: retq -; -; AVX2-LABEL: test2: -; AVX2: # %bb.0: -; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX2-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX2-NEXT: retq +; AVX-LABEL: test2: +; AVX: # %bb.0: +; AVX-NEXT: vpsrlw $2, %xmm0, %xmm1 +; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX-NEXT: retq %lshr = lshr <8 x i16> %a, ret <8 x i16> %lshr } @@ -143,19 +136,12 @@ ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE-NEXT: retq ; -; AVX1-LABEL: test6: -; AVX1: # %bb.0: -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpsraw $3, %xmm0, %xmm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; AVX1-NEXT: retq -; -; AVX2-LABEL: test6: -; AVX2: # %bb.0: -; AVX2-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX2-NEXT: vpsraw $3, %xmm0, %xmm0 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX2-NEXT: retq +; AVX-LABEL: test6: +; AVX: # %bb.0: +; AVX-NEXT: vpsraw $2, %xmm0, %xmm1 +; AVX-NEXT: vpsraw $3, %xmm0, %xmm0 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX-NEXT: retq %lshr = ashr <8 x i16> %a, ret <8 x i16> %lshr } Index: test/CodeGen/X86/sse2.ll =================================================================== --- test/CodeGen/X86/sse2.ll +++ test/CodeGen/X86/sse2.ll @@ -694,23 +694,14 @@ ; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; X64-SSE-NEXT: retq ; -; X64-AVX1-LABEL: PR19721: -; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vmovq %xmm0, %rax -; X64-AVX1-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 -; X64-AVX1-NEXT: andq %rax, %rcx -; X64-AVX1-NEXT: vmovq %rcx, %xmm1 -; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; X64-AVX1-NEXT: retq -; -; X64-AVX512-LABEL: PR19721: -; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: vmovq %xmm0, %rax -; X64-AVX512-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 -; X64-AVX512-NEXT: andq %rax, %rcx -; X64-AVX512-NEXT: vmovq %rcx, %xmm1 -; X64-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] -; X64-AVX512-NEXT: retq +; X64-AVX-LABEL: PR19721: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovq %xmm0, %rax +; X64-AVX-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 +; X64-AVX-NEXT: andq %rax, %rcx +; X64-AVX-NEXT: vmovq %rcx, %xmm1 +; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; X64-AVX-NEXT: retq %bc = bitcast <4 x i32> %i to i128 %insert = and i128 %bc, -4294967296 %bc2 = bitcast i128 %insert to <4 x i32> Index: test/CodeGen/X86/vector-narrow-binop.ll =================================================================== --- test/CodeGen/X86/vector-narrow-binop.ll +++ test/CodeGen/X86/vector-narrow-binop.ll @@ -112,29 +112,13 @@ ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX1-LABEL: PR39893: -; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; AVX1-NEXT: retq -; -; AVX2-LABEL: PR39893: -; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpsubd %xmm0, %xmm2, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX2-NEXT: retq -; -; AVX512-LABEL: PR39893: -; AVX512: # %bb.0: -; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX512-NEXT: vpsubd %xmm0, %xmm2, %xmm0 -; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero -; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX512-NEXT: retq +; AVX-LABEL: PR39893: +; AVX: # %bb.0: +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX-NEXT: retq %sub = sub <2 x i32> , %x %bc = bitcast <2 x i32> %sub to <8 x i8> %shuffle = shufflevector <8 x i8> %y, <8 x i8> %bc, <2 x i32> Index: test/CodeGen/X86/vector-shuffle-128-v4.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-128-v4.ll +++ test/CodeGen/X86/vector-shuffle-128-v4.ll @@ -2136,17 +2136,11 @@ ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: retq ; -; AVX1-LABEL: insert_reg_lo_v4i32: -; AVX1: # %bb.0: -; AVX1-NEXT: vmovq %rdi, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; AVX1-NEXT: retq -; -; AVX2OR512VL-LABEL: insert_reg_lo_v4i32: -; AVX2OR512VL: # %bb.0: -; AVX2OR512VL-NEXT: vmovq %rdi, %xmm1 -; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] -; AVX2OR512VL-NEXT: retq +; AVX-LABEL: insert_reg_lo_v4i32: +; AVX: # %bb.0: +; AVX-NEXT: vmovq %rdi, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; AVX-NEXT: retq %a.cast = bitcast i64 %a to <2 x i32> %v = shufflevector <2 x i32> %a.cast, <2 x i32> undef, <4 x i32> %shuffle = shufflevector <4 x i32> %v, <4 x i32> %b, <4 x i32> Index: test/CodeGen/X86/vector-shuffle-128-v8.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-128-v8.ll +++ test/CodeGen/X86/vector-shuffle-128-v8.ll @@ -1155,7 +1155,7 @@ ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,5,7] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: shuffle_v8i16_0213cedf: @@ -1169,7 +1169,7 @@ ; AVX512VL-SLOW: # %bb.0: ; AVX512VL-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,5,7] ; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7] -; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX512VL-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX512VL-SLOW-NEXT: retq ; ; AVX512VL-FAST-LABEL: shuffle_v8i16_0213cedf: @@ -1266,17 +1266,11 @@ ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3] ; SSE41-NEXT: retq ; -; AVX1-LABEL: shuffle_v8i16_032dXXXX: -; AVX1: # %bb.0: -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3] -; AVX1-NEXT: retq -; -; AVX2OR512VL-LABEL: shuffle_v8i16_032dXXXX: -; AVX2OR512VL: # %bb.0: -; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3] -; AVX2OR512VL-NEXT: retq +; AVX-LABEL: shuffle_v8i16_032dXXXX: +; AVX: # %bb.0: +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3] +; AVX-NEXT: retq %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> ret <8 x i16> %shuffle } @@ -1424,17 +1418,11 @@ ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7] ; SSE41-NEXT: retq ; -; AVX1-LABEL: shuffle_v8i16_012dcde3: -; AVX1: # %bb.0: -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7] -; AVX1-NEXT: retq -; -; AVX2OR512VL-LABEL: shuffle_v8i16_012dcde3: -; AVX2OR512VL: # %bb.0: -; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7] -; AVX2OR512VL-NEXT: retq +; AVX-LABEL: shuffle_v8i16_012dcde3: +; AVX: # %bb.0: +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7] +; AVX-NEXT: retq %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> ret <8 x i16> %shuffle } @@ -1561,19 +1549,12 @@ ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; -; AVX1-LABEL: shuffle_v8i16_XX4X8acX: -; AVX1: # %bb.0: -; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; AVX1-NEXT: retq -; -; AVX2OR512VL-LABEL: shuffle_v8i16_XX4X8acX: -; AVX2OR512VL: # %bb.0: -; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5] -; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] -; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX2OR512VL-NEXT: retq +; AVX-LABEL: shuffle_v8i16_XX4X8acX: +; AVX: # %bb.0: +; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5] +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX-NEXT: retq %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> ret <8 x i16> %shuffle }