Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -27356,6 +27356,37 @@ Other = LHS; } + if (Other.getNode() && Other.getOpcode() == ISD::TRUNCATE && + Other.getOperand(0).getOpcode() == ISD::SUB && isUnsignedIntSetCC(CC)) { + // x >= y ? x - y : 0, where y is wider then x. We can truncate y + // to x, because it is only used if it is less than x. + SDValue SubXY = Other.getOperand(0); + EVT ExtType = SubXY.getValueType(); + + SDValue SubLHS = SubXY.getOperand(0), SubRHS = SubXY.getOperand(1); + SDValue CondLHS = Cond.getOperand(0), CondRHS = Cond.getOperand(1); + ISD::CondCode NewCC = CC; + // Else can occur, if the sub arguments are swapped + // x < y ? y - x : 0 -> y > x ? y - x : 0 + if (CC == ISD::SETULE || CC == ISD::SETULT) { + std::swap(CondLHS, CondRHS); + NewCC = CC == ISD::SETULE ? ISD::SETUGE : ISD::SETUGT; + } + if (CondLHS.getOpcode() == ISD::ZERO_EXTEND && + DAG.isEqualTo(SubRHS, CondRHS) && DAG.isEqualTo(SubLHS, CondLHS)) { + SDValue SaturationConst = DAG.getConstant( + APInt::getMaxValue(VT.getScalarSizeInBits()).getLimitedValue(), + SDLoc(SubRHS), ExtType); + SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubRHS), ExtType, SubRHS, + SaturationConst); + Other = DAG.getNode(ISD::SUB, SDLoc(SubXY), VT, CondLHS.getOperand(0), + DAG.getZExtOrTrunc(UMin, SDLoc(SubRHS), VT)); + Cond = DAG.getNode(NewCC, SDLoc(Cond), VT, Other.getOperand(0), + Other.getOperand(1)); + CC = NewCC; + } + } + if (Other.getNode() && Other->getNumOperands() == 2 && DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) { SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1); Index: test/CodeGen/X86/psubus.ll =================================================================== --- test/CodeGen/X86/psubus.ll +++ test/CodeGen/X86/psubus.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 @@ -513,3 +514,657 @@ store <32 x i8> %6, <32 x i8>* %2, align 1 ret void } + +define void @test13(i16* nocapture %head, i32* nocapture %w) nounwind { +; SSE2-LABEL: test13: +; SSE2: ## BB#0: ## %vector.ph +; SSE2-NEXT: movdqu (%rdi), %xmm0 +; SSE2-NEXT: movdqu (%rsi), %xmm1 +; SSE2-NEXT: movdqu 16(%rsi), %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: movl $65535, %eax ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,2,3] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm4 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm4 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovbl %ecx, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE2-NEXT: pslld $16, %xmm3 +; SSE2-NEXT: psrad $16, %xmm3 +; SSE2-NEXT: packssdw %xmm2, %xmm3 +; SSE2-NEXT: psubusw %xmm3, %xmm0 +; SSE2-NEXT: movdqu %xmm0, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: test13: +; SSSE3: ## BB#0: ## %vector.ph +; SSSE3-NEXT: movdqu (%rdi), %xmm0 +; SSSE3-NEXT: movdqu (%rsi), %xmm1 +; SSSE3-NEXT: movdqu 16(%rsi), %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: movl $65535, %eax ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,2,3] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSSE3-NEXT: pshufb %xmm3, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm5 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovbl %ecx, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; SSSE3-NEXT: pshufb %xmm3, %xmm4 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0] +; SSSE3-NEXT: psubusw %xmm4, %xmm0 +; SSSE3-NEXT: movdqu %xmm0, (%rdi) +; SSSE3-NEXT: retq +; +; AVX1-LABEL: test13: +; AVX1: ## BB#0: ## %vector.ph +; AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; AVX1-NEXT: vmovups (%rsi), %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65535,65535,65535,65535] +; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqu %xmm0, (%rdi) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: test13: +; AVX2: ## BB#0: ## %vector.ph +; AVX2-NEXT: vmovdqu (%rdi), %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vpminud (%rsi), %ymm1, %ymm1 +; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero +; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqu %xmm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +vector.ph: + %0 = getelementptr inbounds i16, i16* %head, i64 0 + %1 = bitcast i16* %0 to <8 x i16>* + %2 = load <8 x i16>, <8 x i16>* %1, align 2 + %3 = getelementptr inbounds i32, i32* %w, i64 0 + %4 = bitcast i32* %3 to <8 x i32>* + %5 = load <8 x i32>, <8 x i32>* %4, align 2 + %6 = zext <8 x i16> %2 to <8 x i32> + %7 = icmp ult <8 x i32> %6, %5 + %8 = sub <8 x i32> %6, %5 + %9 = trunc <8 x i32> %8 to <8 x i16> + %10 = select <8 x i1> %7, <8 x i16> zeroinitializer, <8 x i16> %9 + store <8 x i16> %10, <8 x i16>* %1, align 1 + ret void +} + +define void @test14(i8* nocapture %head, i32* nocapture %w) nounwind { +; SSE-LABEL: test14: +; SSE: ## BB#0: ## %vector.ph +; SSE-NEXT: movdqu (%rdi), %xmm0 +; SSE-NEXT: movdqu (%rsi), %xmm1 +; SSE-NEXT: movdqu 16(%rsi), %xmm3 +; SSE-NEXT: movdqu 32(%rsi), %xmm5 +; SSE-NEXT: movdqu 48(%rsi), %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,1,2,3] +; SSE-NEXT: movd %xmm4, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: movl $255, %eax +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,2,3] +; SSE-NEXT: movd %xmm6, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm7 +; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1] +; SSE-NEXT: movd %xmm2, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm6 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE-NEXT: movd %xmm2, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm2 +; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; SSE-NEXT: pand %xmm2, %xmm6 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[3,1,2,3] +; SSE-NEXT: movd %xmm4, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[1,1,2,3] +; SSE-NEXT: movd %xmm7, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm7 +; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1] +; SSE-NEXT: movd %xmm5, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1] +; SSE-NEXT: movd %xmm5, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm5 +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] +; SSE-NEXT: pand %xmm2, %xmm4 +; SSE-NEXT: packuswb %xmm6, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[3,1,2,3] +; SSE-NEXT: movd %xmm5, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,3] +; SSE-NEXT: movd %xmm6, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm6 +; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] +; SSE-NEXT: movd %xmm3, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE-NEXT: movd %xmm3, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm3 +; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; SSE-NEXT: pand %xmm2, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,1,2,3] +; SSE-NEXT: movd %xmm3, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,2,3] +; SSE-NEXT: movd %xmm6, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm6 +; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] +; SSE-NEXT: movd %xmm1, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movd %ecx, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: movd %xmm1, %ecx +; SSE-NEXT: cmpl $255, %ecx +; SSE-NEXT: cmovbl %ecx, %eax +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] +; SSE-NEXT: pand %xmm2, %xmm3 +; SSE-NEXT: packuswb %xmm5, %xmm3 +; SSE-NEXT: packuswb %xmm4, %xmm3 +; SSE-NEXT: psubusb %xmm3, %xmm0 +; SSE-NEXT: movdqu %xmm0, (%rdi) +; SSE-NEXT: retq +; +; AVX1-LABEL: test14: +; AVX1: ## BB#0: ## %vector.ph +; AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; AVX1-NEXT: vmovups (%rsi), %ymm1 +; AVX1-NEXT: vmovups 32(%rsi), %ymm2 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255] +; AVX1-NEXT: vpminud %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpminud %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpminud %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpminud %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqu %xmm0, (%rdi) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: test14: +; AVX2: ## BB#0: ## %vector.ph +; AVX2-NEXT: vmovdqu (%rdi), %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vpminud 32(%rsi), %ymm1, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128] +; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vpminud (%rsi), %ymm1, %ymm1 +; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqu %xmm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +vector.ph: + %0 = getelementptr inbounds i8, i8* %head, i64 0 + %1 = bitcast i8* %0 to <16 x i8>* + %2 = load <16 x i8>, <16 x i8>* %1, align 2 + %3 = getelementptr inbounds i32, i32* %w, i64 0 + %4 = bitcast i32* %3 to <16 x i32>* + %5 = load <16 x i32>, <16 x i32>* %4, align 2 + %6 = zext <16 x i8> %2 to <16 x i32> + %7 = icmp ult <16 x i32> %6, %5 + %8 = sub <16 x i32> %6, %5 + %9 = trunc <16 x i32> %8 to <16 x i8> + %10 = select <16 x i1> %7, <16 x i8> zeroinitializer, <16 x i8> %9 + store <16 x i8> %10, <16 x i8>* %1, align 1 + ret void +} + +define void @test15(i16* nocapture %head, i32* nocapture %w) nounwind { +; SSE2-LABEL: test15: +; SSE2: ## BB#0: ## %vector.ph +; SSE2-NEXT: movdqu (%rdi), %xmm0 +; SSE2-NEXT: movdqu (%rsi), %xmm1 +; SSE2-NEXT: movdqu 16(%rsi), %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: movl $65535, %eax ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,2,3] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm4 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm4 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovbl %ecx, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE2-NEXT: pslld $16, %xmm3 +; SSE2-NEXT: psrad $16, %xmm3 +; SSE2-NEXT: packssdw %xmm2, %xmm3 +; SSE2-NEXT: psubusw %xmm3, %xmm0 +; SSE2-NEXT: movdqu %xmm0, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: test15: +; SSSE3: ## BB#0: ## %vector.ph +; SSSE3-NEXT: movdqu (%rdi), %xmm0 +; SSSE3-NEXT: movdqu (%rsi), %xmm1 +; SSSE3-NEXT: movdqu 16(%rsi), %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: movl $65535, %eax ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,2,3] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSSE3-NEXT: pshufb %xmm3, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm5 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovbl %ecx, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; SSSE3-NEXT: pshufb %xmm3, %xmm4 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0] +; SSSE3-NEXT: psubusw %xmm4, %xmm0 +; SSSE3-NEXT: movdqu %xmm0, (%rdi) +; SSSE3-NEXT: retq +; +; AVX1-LABEL: test15: +; AVX1: ## BB#0: ## %vector.ph +; AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; AVX1-NEXT: vmovups (%rsi), %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65535,65535,65535,65535] +; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqu %xmm0, (%rdi) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: test15: +; AVX2: ## BB#0: ## %vector.ph +; AVX2-NEXT: vmovdqu (%rdi), %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vpminud (%rsi), %ymm1, %ymm1 +; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero +; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqu %xmm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +vector.ph: + %0 = getelementptr inbounds i16, i16* %head, i64 0 + %1 = bitcast i16* %0 to <8 x i16>* + %2 = load <8 x i16>, <8 x i16>* %1, align 2 + %3 = getelementptr inbounds i32, i32* %w, i64 0 + %4 = bitcast i32* %3 to <8 x i32>* + %5 = load <8 x i32>, <8 x i32>* %4, align 2 + %6 = zext <8 x i16> %2 to <8 x i32> + %7 = icmp ugt <8 x i32> %6, %5 + %8 = sub <8 x i32> %6, %5 + %9 = trunc <8 x i32> %8 to <8 x i16> + %10 = select <8 x i1> %7, <8 x i16> %9, <8 x i16> zeroinitializer + store <8 x i16> %10, <8 x i16>* %1, align 1 + ret void +} + +define void @test16(i16* nocapture %head, i32* nocapture %w) nounwind { +; SSE2-LABEL: test16: +; SSE2: ## BB#0: ## %vector.ph +; SSE2-NEXT: movdqu (%rdi), %xmm0 +; SSE2-NEXT: movdqu (%rsi), %xmm1 +; SSE2-NEXT: movdqu 16(%rsi), %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: movl $65535, %eax ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,2,3] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm4 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm4, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm4 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovael %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSE2-NEXT: cmovbl %ecx, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE2-NEXT: pslld $16, %xmm3 +; SSE2-NEXT: psrad $16, %xmm3 +; SSE2-NEXT: packssdw %xmm2, %xmm3 +; SSE2-NEXT: psubusw %xmm3, %xmm0 +; SSE2-NEXT: movdqu %xmm0, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: test16: +; SSSE3: ## BB#0: ## %vector.ph +; SSSE3-NEXT: movdqu (%rdi), %xmm0 +; SSSE3-NEXT: movdqu (%rsi), %xmm1 +; SSSE3-NEXT: movdqu 16(%rsi), %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: movl $65535, %eax ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,2,3] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSSE3-NEXT: pshufb %xmm3, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm4, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm5 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovael %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: cmpl $65535, %ecx ## imm = 0xFFFF +; SSSE3-NEXT: cmovbl %ecx, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; SSSE3-NEXT: pshufb %xmm3, %xmm4 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0] +; SSSE3-NEXT: psubusw %xmm4, %xmm0 +; SSSE3-NEXT: movdqu %xmm0, (%rdi) +; SSSE3-NEXT: retq +; +; AVX1-LABEL: test16: +; AVX1: ## BB#0: ## %vector.ph +; AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; AVX1-NEXT: vmovups (%rsi), %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65535,65535,65535,65535] +; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqu %xmm0, (%rdi) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: test16: +; AVX2: ## BB#0: ## %vector.ph +; AVX2-NEXT: vmovdqu (%rdi), %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vpminud (%rsi), %ymm1, %ymm1 +; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero +; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqu %xmm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +vector.ph: + %0 = getelementptr inbounds i16, i16* %head, i64 0 + %1 = bitcast i16* %0 to <8 x i16>* + %2 = load <8 x i16>, <8 x i16>* %1, align 2 + %3 = getelementptr inbounds i32, i32* %w, i64 0 + %4 = bitcast i32* %3 to <8 x i32>* + %5 = load <8 x i32>, <8 x i32>* %4, align 2 + %6 = zext <8 x i16> %2 to <8 x i32> + %7 = icmp ult <8 x i32> %5, %6 + %8 = sub <8 x i32> %6, %5 + %9 = trunc <8 x i32> %8 to <8 x i16> + %10 = select <8 x i1> %7, <8 x i16> %9, <8 x i16> zeroinitializer + store <8 x i16> %10, <8 x i16>* %1, align 1 + ret void +}