Index: docs/LangRef.rst =================================================================== --- docs/LangRef.rst +++ docs/LangRef.rst @@ -10742,8 +10742,10 @@ Overview: """"""""" -The ``llvm.uabsdiff`` intrinsic returns a vector result of the absolute difference of -the two operands, treating them both as unsigned integers. +The ``llvm.uabsdiff`` intrinsic returns a vector result of the absolute difference +of the two operands, treating them both as unsigned integers. The operation of this +intrinsic will be performed on a larger data type than the given type. +The final result will be truncated to the given type. The ``llvm.sabsdiff`` intrinsic returns a vector result of the absolute difference of the two operands, treating them both as signed integers. @@ -10768,19 +10770,19 @@ is equivalent to:: - %sub = sub <4 x i32> %a, %b - %ispos = icmp ugt <4 x i32> %sub, - %neg = sub <4 x i32> zeroinitializer, %sub - %1 = select <4 x i1> %ispos, <4 x i32> %sub, <4 x i32> %neg + %1 = zext <4 x i32> %a to <4 x i64> + %2 = zext <4 x i32> %b to <4 x i64> + %sub = sub <4 x i64> %1, %2 + %trunc = trunc <4 x i64> to <4 x i32> -Similarly the expression:: +and the expression:: call <4 x i32> @llvm.sabsdiff.v4i32(<4 x i32> %a, <4 x i32> %b) is equivalent to:: %sub = sub nsw <4 x i32> %a, %b - %ispos = icmp sgt <4 x i32> %sub, + %ispos = icmp sge <4 x i32> %sub, zeroinitializer %neg = sub nsw <4 x i32> zeroinitializer, %sub %1 = select <4 x i1> %ispos, <4 x i32> %sub, <4 x i32> %neg Index: lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -723,24 +723,36 @@ SDValue VectorLegalizer::ExpandABSDIFF(SDValue Op) { SDLoc dl(Op); - SDValue Tmp1, Tmp2, Tmp3, Tmp4; + SDValue Sub, Neg, Cmp; + SDValue Op0 = Op.getOperand(0); + SDValue Op1 = Op.getOperand(1); EVT VT = Op.getValueType(); + + // For unsigned intrinsic, promote the type to handle unsigned overflow. + bool isUabsdiff = (Op->getOpcode() == ISD::UABSDIFF); + EVT EltVT = VT.getVectorElementType(); + unsigned NumElt = VT.getVectorNumElements(); + MVT NVT; + if (isUabsdiff) { + NVT = TLI.getTypeToPromoteTo(ISD::UABSDIFF, EltVT.getSimpleVT()); + NVT = MVT::getVectorVT(NVT, NumElt); + Op0 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Op0); + Op1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Op1); + } + SDNodeFlags Flags; - Flags.setNoSignedWrap(Op->getOpcode() == ISD::SABSDIFF); - - Tmp2 = Op.getOperand(0); - Tmp3 = Op.getOperand(1); - Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp3, &Flags); - Tmp2 = - DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Tmp1, &Flags); - Tmp4 = DAG.getNode( - ISD::SETCC, dl, - TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Tmp2, - DAG.getConstant(0, dl, VT), - DAG.getCondCode(Op->getOpcode() == ISD::SABSDIFF ? ISD::SETLT - : ISD::SETULT)); - Tmp1 = DAG.getNode(ISD::VSELECT, dl, VT, Tmp4, Tmp1, Tmp2); - return Tmp1; + Flags.setNoSignedWrap(!isUabsdiff); + VT = isUabsdiff ? EVT(NVT) : VT; + Sub = DAG.getNode(ISD::SUB, dl, VT, Op0, Op1, &Flags); + if (isUabsdiff) + return DAG.getNode(ISD::TRUNCATE, dl, Op.getValueType(), Sub); + + Cmp = + DAG.getNode(ISD::SETCC, dl, TLI.getSetCCResultType(DAG.getDataLayout(), + *DAG.getContext(), VT), + Sub, DAG.getConstant(0, dl, VT), DAG.getCondCode(ISD::SETGE)); + Neg = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Sub, &Flags); + return DAG.getNode(ISD::VSELECT, dl, VT, Cmp, Sub, Neg); } SDValue VectorLegalizer::ExpandSELECT(SDValue Op) { Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -315,6 +315,12 @@ // Promote the i8 variants and force them on up to i32 which has a shorter // encoding. + setOperationAction(ISD::UABSDIFF , MVT::i8 , Promote); + AddPromotedToType (ISD::UABSDIFF , MVT::i8 , MVT::i16); + setOperationAction(ISD::UABSDIFF , MVT::i16 , Promote); + AddPromotedToType (ISD::UABSDIFF , MVT::i16 , MVT::i32); + setOperationAction(ISD::UABSDIFF , MVT::i32 , Promote); + AddPromotedToType (ISD::UABSDIFF , MVT::i32 , MVT::i64); setOperationAction(ISD::CTTZ , MVT::i8 , Promote); AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); Index: test/CodeGen/X86/absdiff_128.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/absdiff_128.ll @@ -0,0 +1,181 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s -check-prefix=CHECK + +declare <4 x i8> @llvm.uabsdiff.v4i8(<4 x i8>, <4 x i8>) + +define <4 x i8> @test_uabsdiff_v4i8_expand(<4 x i8> %a1, <4 x i8> %a2) { +; CHECK-LABEL: test_uabsdiff_v4i8_expand +; CHECK: pshufd +; CHECK: movd +; CHECK: subl +; CHECK: punpckldq +; CHECK-DAG: movd %xmm1, [[SRC:%e[a-d]+x]] +; CHECK-DAG: movd %xmm0, [[DST:%e[a-d]+x]] +; CHECK: subl [[SRC]], [[DST]] +; CHECK: movd +; CHECK: pshufd +; CHECK: movd +; CHECK: punpckldq +; CHECK: movdqa +; CHECK: retq + + %1 = call <4 x i8> @llvm.uabsdiff.v4i8(<4 x i8> %a1, <4 x i8> %a2) + ret <4 x i8> %1 +} + +declare <4 x i8> @llvm.sabsdiff.v4i8(<4 x i8>, <4 x i8>) + +define <4 x i8> @test_sabsdiff_v4i8_expand(<4 x i8> %a1, <4 x i8> %a2) { +; CHECK-LABEL: test_sabsdiff_v4i8_expand +; CHECK: psubd +; CHECK: pcmpgtd +; CHECK: pcmpeqd +; CHECK: pxor +; CHECK-DAG: psubd {{%xmm[0-9]+}}, [[SRC1:%xmm[0-9]+]] +; CHECK-DAG: pandn {{%xmm[0-9]+}}, [[SRC2:%xmm[0-9]+]] +; CHECK-DAG: pandn [[SRC1]], [[DST:%xmm[0-9]+]] +; CHECK: por [[SRC2]], [[DST]] +; CHECK: retq + + %1 = call <4 x i8> @llvm.sabsdiff.v4i8(<4 x i8> %a1, <4 x i8> %a2) + ret <4 x i8> %1 +} + +declare <8 x i8> @llvm.sabsdiff.v8i8(<8 x i8>, <8 x i8>) + +define <8 x i8> @test_sabsdiff_v8i8_expand(<8 x i8> %a1, <8 x i8> %a2) { +; CHECK-LABEL: test_sabsdiff_v8i8_expand +; CHECK: psubw +; CHECK: pcmpgtw +; CHECK: pcmpeqd +; CHECK: pxor +; CHECK-DAG: psubw {{%xmm[0-9]+}}, [[SRC1:%xmm[0-9]+]] +; CHECK-DAG: pandn {{%xmm[0-9]+}}, [[SRC2:%xmm[0-9]+]] +; CHECK-DAG: pandn [[SRC1]], [[DST:%xmm[0-9]+]] +; CHECK: por [[SRC2]], [[DST]] +; CHECK: retq + + %1 = call <8 x i8> @llvm.sabsdiff.v8i8(<8 x i8> %a1, <8 x i8> %a2) + ret <8 x i8> %1 +} + +declare <16 x i8> @llvm.uabsdiff.v16i8(<16 x i8>, <16 x i8>) + +define <16 x i8> @test_uabsdiff_v16i8_expand(<16 x i8> %a1, <16 x i8> %a2) { +; CHECK-LABEL: test_uabsdiff_v16i8_expand +; CHECK: movd +; CHECK-DAG: movzbl {{.*}}, {{%e[a-d]+x}} +; CHECK-DAG: movzbl {{.*}}, {{%e[a-d]+x}} +; CHECK: subl +; CHECK: punpcklbw +; CHECK: retq + + %1 = call <16 x i8> @llvm.uabsdiff.v16i8(<16 x i8> %a1, <16 x i8> %a2) + ret <16 x i8> %1 +} + +declare <8 x i16> @llvm.uabsdiff.v8i16(<8 x i16>, <8 x i16>) + +define <8 x i16> @test_uabsdiff_v8i16_expand(<8 x i16> %a1, <8 x i16> %a2) { +; CHECK-LABEL: test_uabsdiff_v8i16_expand +; CHECK-DAG: pextrw {{.*}}, {{%e[a-d]+x}} +; CHECK-DAG: pextrw {{.*}}, {{%e[a-d]+x}} +; CHECK: subl +; CHECK: punpcklwd +; CHECK: retq + + %1 = call <8 x i16> @llvm.uabsdiff.v8i16(<8 x i16> %a1, <8 x i16> %a2) + ret <8 x i16> %1 +} + +declare <8 x i16> @llvm.sabsdiff.v8i16(<8 x i16>, <8 x i16>) + +define <8 x i16> @test_sabsdiff_v8i16_expand(<8 x i16> %a1, <8 x i16> %a2) { +; CHECK-LABEL: test_sabsdiff_v8i16_expand +; CHECK: psubw +; CHECK: pcmpgtw +; CHECK: pcmpeqd +; CHECK: pxor +; CHECK-DAG: psubw {{%xmm[0-9]+}}, [[SRC1:%xmm[0-9]+]] +; CHECK-DAG: pandn {{%xmm[0-9]+}}, [[SRC2:%xmm[0-9]+]] +; CHECK-DAG: pandn [[SRC1]], [[DST:%xmm[0-9]+]] +; CHECK: por [[SRC2]], [[DST]] +; CHECK: retq + + %1 = call <8 x i16> @llvm.sabsdiff.v8i16(<8 x i16> %a1, <8 x i16> %a2) + ret <8 x i16> %1 +} + +declare <4 x i32> @llvm.sabsdiff.v4i32(<4 x i32>, <4 x i32>) + +define <4 x i32> @test_sabsdiff_v4i32_expand(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: test_sabsdiff_v4i32_expand +; CHECK: psubd +; CHECK: pcmpgtd +; CHECK: pcmpeqd +; CHECK: pxor +; CHECK-DAG: psubd {{%xmm[0-9]+}}, [[SRC1:%xmm[0-9]+]] +; CHECK-DAG: pandn {{%xmm[0-9]+}}, [[SRC2:%xmm[0-9]+]] +; CHECK-DAG: pandn [[SRC1]], [[DST:%xmm[0-9]+]] +; CHECK: por [[SRC2]], [[DST]] +; CHECK: retq + %1 = call <4 x i32> @llvm.sabsdiff.v4i32(<4 x i32> %a1, <4 x i32> %a2) + ret <4 x i32> %1 +} + +declare <4 x i32> @llvm.uabsdiff.v4i32(<4 x i32>, <4 x i32>) + +define <4 x i32> @test_uabsdiff_v4i32_expand(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: test_uabsdiff_v4i32_expand +; CHECK: pshufd +; CHECK: movd +; CHECK: subl +; CHECK: punpckldq +; CHECK-DAG: movd %xmm1, [[SRC:%e[a-d]+x]] +; CHECK-DAG: movd %xmm0, [[DST:%e[a-d]+x]] +; CHECK: subl [[SRC]], [[DST]] +; CHECK: movd +; CHECK: pshufd +; CHECK: movd +; CHECK: punpckldq +; CHECK: movdqa +; CHECK: retq + + %1 = call <4 x i32> @llvm.uabsdiff.v4i32(<4 x i32> %a1, <4 x i32> %a2) + ret <4 x i32> %1 +} + +declare <2 x i32> @llvm.sabsdiff.v2i32(<2 x i32>, <2 x i32>) + +define <2 x i32> @test_sabsdiff_v2i32_expand(<2 x i32> %a1, <2 x i32> %a2) { +; CHECK-LABEL: test_sabsdiff_v2i32_expand +; CHECK: psubq +; CHECK: pcmpgtd +; CHECK: pcmpeqd +; CHECK: pxor +; CHECK-DAG: psubq {{%xmm[0-9]+}}, [[SRC1:%xmm[0-9]+]] +; CHECK-DAG: pandn {{%xmm[0-9]+}}, [[SRC2:%xmm[0-9]+]] +; CHECK-DAG: pandn [[SRC1]], [[DST:%xmm[0-9]+]] +; CHECK: por [[SRC2]], [[DST]] +; CHECK: retq + + %1 = call <2 x i32> @llvm.sabsdiff.v2i32(<2 x i32> %a1, <2 x i32> %a2) + ret <2 x i32> %1 +} + +declare <2 x i64> @llvm.sabsdiff.v2i64(<2 x i64>, <2 x i64>) + +define <2 x i64> @test_sabsdiff_v2i64_expand(<2 x i64> %a1, <2 x i64> %a2) { +; CHECK-LABEL: test_sabsdiff_v2i64_expand +; CHECK: psubq +; CHECK: pcmpgtd +; CHECK: pcmpeqd +; CHECK: pxor +; CHECK-DAG: psubq {{%xmm[0-9]+}}, [[SRC1:%xmm[0-9]+]] +; CHECK-DAG: pandn {{%xmm[0-9]+}}, [[SRC2:%xmm[0-9]+]] +; CHECK-DAG: pandn [[SRC1]], [[DST:%xmm[0-9]+]] +; CHECK: por [[SRC2]], [[DST]] +; CHECK: retq + + %1 = call <2 x i64> @llvm.sabsdiff.v2i64(<2 x i64> %a1, <2 x i64> %a2) + ret <2 x i64> %1 +} Index: test/CodeGen/X86/absdiff_256.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/absdiff_256.ll @@ -0,0 +1,53 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s -check-prefix=CHECK + +declare <16 x i32> @llvm.sabsdiff.v16i32(<16 x i32>, <16 x i32>) + +define <16 x i32> @test_sabsdiff_v16i32_expand(<16 x i32> %a1, <16 x i32> %a2) { +; CHECK-LABEL: test_sabsdiff_v16i32_expand +; CHECK-DAG: psubd {{%xmm[0-9]+}}, [[SRC:%xmm[0-9]+]] +; CHECK: pxor +; CHECK-DAG: pxor {{%xmm[0-9]+}}, [[DST:%xmm[0-9]+]] +; CHECK: pcmpgtd [[SRC]], [[DST]] +; CHECK: movdqa +; CHECK: pandn +; CHECK: pxor +; CHECK: psubd +; CHECK: pcmpeqd +; CHECK: pxor +; CHECK: pandn +; CHECK: por +; CHECK-DAG: psubd {{%xmm[0-9]+}}, [[SRC:%xmm[0-9]+]] +; CHECK-DAG: pxor {{%xmm[0-9]+}}, [[DST:%xmm[0-9]+]] +; CHECK: pcmpgtd [[SRC]], [[DST]] +; CHECK: movdqa +; CHECK: pandn +; CHECK: pxor +; CHECK: psubd +; CHECK: pxor +; CHECK: pandn +; CHECK: por +; CHECK-DAG: psubd {{%xmm[0-9]+}}, [[SRC:%xmm[0-9]+]] +; CHECK-DAG: pxor {{%xmm[0-9]+}}, [[DST:%xmm[0-9]+]] +; CHECK: pcmpgtd [[SRC]], [[DST]] +; CHECK: movdqa +; CHECK: pandn +; CHECK: pxor +; CHECK-DAG: psubd {{%xmm[0-9]+}}, [[SRC:%xmm[0-9]+]] +; CHECK-DAG: pxor {{%xmm[0-9]+}}, [[DST:%xmm[0-9]+]] +; CHECK: pandn [[SRC]], [[DST]] +; CHECK: por +; CHECK-DAG: psubd {{%xmm[0-9]+}}, [[SRC:%xmm[0-9]+]] +; CHECK-DAG: pxor {{%xmm[0-9]+}}, [[DST:%xmm[0-9]+]] +; CHECK: pcmpgtd [[SRC]], [[DST]] +; CHECK: movdqa +; CHECK: pandn +; CHECK-DAG: psubd {{%xmm[0-9]+}}, [[SRC:%xmm[0-9]+]] +; CHECK-DAG: pxor {{%xmm[0-9]+}}, [[DST:%xmm[0-9]+]] +; CHECK: pandn [[SRC]], [[DST]] +; CHECK: por +; CHECK: movdqa +; CHECK: retq + %1 = call <16 x i32> @llvm.sabsdiff.v16i32(<16 x i32> %a1, <16 x i32> %a2) + ret <16 x i32> %1 +} + Index: test/CodeGen/X86/absdiff_expand.ll =================================================================== --- test/CodeGen/X86/absdiff_expand.ll +++ /dev/null @@ -1,242 +0,0 @@ -; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=CHECK - -declare <4 x i8> @llvm.uabsdiff.v4i8(<4 x i8>, <4 x i8>) - -define <4 x i8> @test_uabsdiff_v4i8_expand(<4 x i8> %a1, <4 x i8> %a2) { -; CHECK-LABEL: test_uabsdiff_v4i8_expand -; CHECK: psubd %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: psubd %xmm0, %xmm1 -; CHECK-NEXT: movdqa .LCPI{{[0-9_]*}} -; CHECK-NEXT: movdqa %xmm1, %xmm3 -; CHECK-NEXT: pxor %xmm2, %xmm3 -; CHECK-NEXT: pcmpgtd %xmm3, %xmm2 -; CHECK-NEXT: pand %xmm2, %xmm0 -; CHECK-NEXT: pandn %xmm1, %xmm2 -; CHECK-NEXT: por %xmm2, %xmm0 -; CHECK-NEXT: retq - - %1 = call <4 x i8> @llvm.uabsdiff.v4i8(<4 x i8> %a1, <4 x i8> %a2) - ret <4 x i8> %1 -} - -declare <4 x i8> @llvm.sabsdiff.v4i8(<4 x i8>, <4 x i8>) - -define <4 x i8> @test_sabsdiff_v4i8_expand(<4 x i8> %a1, <4 x i8> %a2) { -; CHECK-LABEL: test_sabsdiff_v4i8_expand -; CHECK: psubd %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: pxor %xmm2, %xmm2 -; CHECK-NEXT: psubd %xmm0, %xmm2 -; CHECK-NEXT: pcmpgtd %xmm2, %xmm1 -; CHECK-NEXT: pand %xmm1, %xmm0 -; CHECK-NEXT: pandn %xmm2, %xmm1 -; CHECK-NEXT: por %xmm1, %xmm0 -; CHECK-NEXT: retq - - %1 = call <4 x i8> @llvm.sabsdiff.v4i8(<4 x i8> %a1, <4 x i8> %a2) - ret <4 x i8> %1 -} - - -declare <8 x i8> @llvm.sabsdiff.v8i8(<8 x i8>, <8 x i8>) - -define <8 x i8> @test_sabsdiff_v8i8_expand(<8 x i8> %a1, <8 x i8> %a2) { -; CHECK-LABEL: test_sabsdiff_v8i8_expand -; CHECK: psubw %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: pxor %xmm2, %xmm2 -; CHECK-NEXT: psubw %xmm0, %xmm2 -; CHECK-NEXT: pcmpgtw %xmm2, %xmm1 -; CHECK-NEXT: pand %xmm1, %xmm0 -; CHECK-NEXT: pandn %xmm2, %xmm1 -; CHECK-NEXT: por %xmm1, %xmm0 -; CHECK-NEXT: retq - %1 = call <8 x i8> @llvm.sabsdiff.v8i8(<8 x i8> %a1, <8 x i8> %a2) - ret <8 x i8> %1 -} - -declare <16 x i8> @llvm.uabsdiff.v16i8(<16 x i8>, <16 x i8>) - -define <16 x i8> @test_uabsdiff_v16i8_expand(<16 x i8> %a1, <16 x i8> %a2) { -; CHECK-LABEL: test_uabsdiff_v16i8_expand -; CHECK: psubb %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: psubb %xmm0, %xmm1 -; CHECK-NEXT: movdqa .LCPI{{[0-9_]*}} -; CHECK-NEXT: movdqa %xmm1, %xmm3 -; CHECK-NEXT: pxor %xmm2, %xmm3 -; CHECK-NEXT: pcmpgtb %xmm3, %xmm2 -; CHECK-NEXT: pand %xmm2, %xmm0 -; CHECK-NEXT: pandn %xmm1, %xmm2 -; CHECK-NEXT: por %xmm2, %xmm0 -; CHECK-NEXT: retq - %1 = call <16 x i8> @llvm.uabsdiff.v16i8(<16 x i8> %a1, <16 x i8> %a2) - ret <16 x i8> %1 -} - -declare <8 x i16> @llvm.uabsdiff.v8i16(<8 x i16>, <8 x i16>) - -define <8 x i16> @test_uabsdiff_v8i16_expand(<8 x i16> %a1, <8 x i16> %a2) { -; CHECK-LABEL: test_uabsdiff_v8i16_expand -; CHECK: psubw %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: psubw %xmm0, %xmm1 -; CHECK-NEXT: movdqa .LCPI{{[0-9_]*}} -; CHECK-NEXT: movdqa %xmm1, %xmm3 -; CHECK-NEXT: pxor %xmm2, %xmm3 -; CHECK-NEXT: pcmpgtw %xmm3, %xmm2 -; CHECK-NEXT: pand %xmm2, %xmm0 -; CHECK-NEXT: pandn %xmm1, %xmm2 -; CHECK-NEXT: por %xmm2, %xmm0 -; CHECK-NEXT: retq - %1 = call <8 x i16> @llvm.uabsdiff.v8i16(<8 x i16> %a1, <8 x i16> %a2) - ret <8 x i16> %1 -} - -declare <8 x i16> @llvm.sabsdiff.v8i16(<8 x i16>, <8 x i16>) - -define <8 x i16> @test_sabsdiff_v8i16_expand(<8 x i16> %a1, <8 x i16> %a2) { -; CHECK-LABEL: test_sabsdiff_v8i16_expand -; CHECK: psubw %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: pxor %xmm2, %xmm2 -; CHECK-NEXT: psubw %xmm0, %xmm2 -; CHECK-NEXT: pcmpgtw %xmm2, %xmm1 -; CHECK-NEXT: pand %xmm1, %xmm0 -; CHECK-NEXT: pandn %xmm2, %xmm1 -; CHECK-NEXT: por %xmm1, %xmm0 -; CHECK-NEXT: retq - %1 = call <8 x i16> @llvm.sabsdiff.v8i16(<8 x i16> %a1, <8 x i16> %a2) - ret <8 x i16> %1 -} - -declare <4 x i32> @llvm.sabsdiff.v4i32(<4 x i32>, <4 x i32>) - -define <4 x i32> @test_sabsdiff_v4i32_expand(<4 x i32> %a1, <4 x i32> %a2) { -; CHECK-LABEL: test_sabsdiff_v4i32_expand -; CHECK: psubd %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: pxor %xmm2, %xmm2 -; CHECK-NEXT: psubd %xmm0, %xmm2 -; CHECK-NEXT: pcmpgtd %xmm2, %xmm1 -; CHECK-NEXT: pand %xmm1, %xmm0 -; CHECK-NEXT: pandn %xmm2, %xmm1 -; CHECK-NEXT: por %xmm1, %xmm0 -; CHECK-NEXT: retq - %1 = call <4 x i32> @llvm.sabsdiff.v4i32(<4 x i32> %a1, <4 x i32> %a2) - ret <4 x i32> %1 -} - -declare <4 x i32> @llvm.uabsdiff.v4i32(<4 x i32>, <4 x i32>) - -define <4 x i32> @test_uabsdiff_v4i32_expand(<4 x i32> %a1, <4 x i32> %a2) { -; CHECK-LABEL: test_uabsdiff_v4i32_expand -; CHECK: psubd %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: psubd %xmm0, %xmm1 -; CHECK-NEXT: movdqa .LCPI{{[0-9_]*}} -; CHECK-NEXT: movdqa %xmm1, %xmm3 -; CHECK-NEXT: pxor %xmm2, %xmm3 -; CHECK-NEXT: pcmpgtd %xmm3, %xmm2 -; CHECK-NEXT: pand %xmm2, %xmm0 -; CHECK-NEXT: pandn %xmm1, %xmm2 -; CHECK-NEXT: por %xmm2, %xmm0 -; CHECK-NEXT: retq - %1 = call <4 x i32> @llvm.uabsdiff.v4i32(<4 x i32> %a1, <4 x i32> %a2) - ret <4 x i32> %1 -} - -declare <2 x i32> @llvm.sabsdiff.v2i32(<2 x i32>, <2 x i32>) - -define <2 x i32> @test_sabsdiff_v2i32_expand(<2 x i32> %a1, <2 x i32> %a2) { -; CHECK-LABEL: test_sabsdiff_v2i32_expand -; CHECK: psubq %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: psubq %xmm0, %xmm1 -; CHECK-NEXT: movdqa .LCPI{{[0-9_]*}} -; CHECK-NEXT: movdqa %xmm1, %xmm3 -; CHECK-NEXT: pxor %xmm2, %xmm3 -; CHECK-NEXT: movdqa %xmm2, %xmm4 -; CHECK-NEXT: pcmpgtd %xmm3, %xmm4 -; CHECK-NEXT: pshufd $160, %xmm4, %xmm5 # xmm5 = xmm4[0,0,2,2] -; CHECK-NEXT: pcmpeqd %xmm2, %xmm3 -; CHECK-NEXT: pshufd $245, %xmm3, %xmm2 # xmm2 = xmm3[1,1,3,3] -; CHECK-NEXT: pand %xmm5, %xmm2 -; CHECK-NEXT: pshufd $245, %xmm4, %xmm3 # xmm3 = xmm4[1,1,3,3] -; CHECK-NEXT: por %xmm2, %xmm3 -; CHECK-NEXT: pand %xmm3, %xmm0 -; CHECK-NEXT: pandn %xmm1, %xmm3 -; CHECK-NEXT: por %xmm3, %xmm0 -; CHECK-NEXT: retq - %1 = call <2 x i32> @llvm.sabsdiff.v2i32(<2 x i32> %a1, <2 x i32> %a2) - ret <2 x i32> %1 -} - -declare <2 x i64> @llvm.sabsdiff.v2i64(<2 x i64>, <2 x i64>) - -define <2 x i64> @test_sabsdiff_v2i64_expand(<2 x i64> %a1, <2 x i64> %a2) { -; CHECK-LABEL: test_sabsdiff_v2i64_expand -; CHECK: psubq %xmm1, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: psubq %xmm0, %xmm1 -; CHECK-NEXT: movdqa .LCPI{{[0-9_]*}} -; CHECK-NEXT: movdqa %xmm1, %xmm3 -; CHECK-NEXT: pxor %xmm2, %xmm3 -; CHECK-NEXT: movdqa %xmm2, %xmm4 -; CHECK-NEXT: pcmpgtd %xmm3, %xmm4 -; CHECK-NEXT: pshufd $160, %xmm4, %xmm5 # xmm5 = xmm4[0,0,2,2] -; CHECK-NEXT: pcmpeqd %xmm2, %xmm3 -; CHECK-NEXT: pshufd $245, %xmm3, %xmm2 # xmm2 = xmm3[1,1,3,3] -; CHECK-NEXT: pand %xmm5, %xmm2 -; CHECK-NEXT: pshufd $245, %xmm4, %xmm3 # xmm3 = xmm4[1,1,3,3] -; CHECK-NEXT: por %xmm2, %xmm3 -; CHECK-NEXT: pand %xmm3, %xmm0 -; CHECK-NEXT: pandn %xmm1, %xmm3 -; CHECK-NEXT: por %xmm3, %xmm0 -; CHECK-NEXT: retq - %1 = call <2 x i64> @llvm.sabsdiff.v2i64(<2 x i64> %a1, <2 x i64> %a2) - ret <2 x i64> %1 -} - -declare <16 x i32> @llvm.sabsdiff.v16i32(<16 x i32>, <16 x i32>) - -define <16 x i32> @test_sabsdiff_v16i32_expand(<16 x i32> %a1, <16 x i32> %a2) { -; CHECK-LABEL: test_sabsdiff_v16i32_expand -; CHECK: psubd %xmm4, %xmm0 -; CHECK-NEXT: pxor %xmm8, %xmm8 -; CHECK-NEXT: pxor %xmm9, %xmm9 -; CHECK-NEXT: psubd %xmm0, %xmm9 -; CHECK-NEXT: pxor %xmm4, %xmm4 -; CHECK-NEXT: pcmpgtd %xmm9, %xmm4 -; CHECK-NEXT: pand %xmm4, %xmm0 -; CHECK-NEXT: pandn %xmm9, %xmm4 -; CHECK-NEXT: por %xmm4, %xmm0 -; CHECK-NEXT: psubd %xmm5, %xmm1 -; CHECK-NEXT: pxor %xmm4, %xmm4 -; CHECK-NEXT: psubd %xmm1, %xmm4 -; CHECK-NEXT: pxor %xmm5, %xmm5 -; CHECK-NEXT: pcmpgtd %xmm4, %xmm5 -; CHECK-NEXT: pand %xmm5, %xmm1 -; CHECK-NEXT: pandn %xmm4, %xmm5 -; CHECK-NEXT: por %xmm5, %xmm1 -; CHECK-NEXT: psubd %xmm6, %xmm2 -; CHECK-NEXT: pxor %xmm4, %xmm4 -; CHECK-NEXT: psubd %xmm2, %xmm4 -; CHECK-NEXT: pxor %xmm5, %xmm5 -; CHECK-NEXT: pcmpgtd %xmm4, %xmm5 -; CHECK-NEXT: pand %xmm5, %xmm2 -; CHECK-NEXT: pandn %xmm4, %xmm5 -; CHECK-NEXT: por %xmm5, %xmm2 -; CHECK-NEXT: psubd %xmm7, %xmm3 -; CHECK-NEXT: pxor %xmm4, %xmm4 -; CHECK-NEXT: psubd %xmm3, %xmm4 -; CHECK-NEXT: pcmpgtd %xmm4, %xmm8 -; CHECK-NEXT: pand %xmm8, %xmm3 -; CHECK-NEXT: pandn %xmm4, %xmm8 -; CHECK-NEXT: por %xmm8, %xmm3 -; CHECK-NEXT: retq - %1 = call <16 x i32> @llvm.sabsdiff.v16i32(<16 x i32> %a1, <16 x i32> %a2) - ret <16 x i32> %1 -} -