Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -16303,6 +16303,10 @@ MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2); if (Op.getOpcode() == ISD::SHL) { + // Simple i8 add case + if (ShiftAmt == 1) + return DAG.getNode(ISD::ADD, dl, VT, R, R); + // Make a large shift. SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R, ShiftAmt, DAG); @@ -16686,36 +16690,111 @@ } } - if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { - // Turn 'a' into a mask suitable for VSELECT: a = a << 5; - Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, dl, VT)); - - SDValue VSelM = DAG.getConstant(0x80, dl, VT); - SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); - OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); - - // r = VSELECT(r, shl(r, 4), a); - SDValue M = DAG.getNode(ISD::SHL, dl, VT, R, DAG.getConstant(4, dl, VT)); - R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); + if (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget->hasInt256())) { + MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2); + unsigned ShiftOpcode = Op->getOpcode(); + + auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) { + // On SSE41 targets we make use of the fact that VSELECT lowers + // to PBLENDVB which selects bytes based just on the sign bit. + if (Subtarget->hasSSE41()) { + V0 = DAG.getNode(ISD::BITCAST, dl, VT, V0); + V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1); + Sel = DAG.getNode(ISD::BITCAST, dl, VT, Sel); + return DAG.getNode(ISD::BITCAST, dl, SelVT, + DAG.getNode(ISD::VSELECT, dl, VT, Sel, V0, V1)); + } + // On pre-SSE41 targets we test for the sign bit by comparing to + // zero - a negative value will set all bits of the lanes to true + // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering. + SDValue Z = getZeroVector(SelVT, Subtarget, DAG, dl); + SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel); + return DAG.getNode(ISD::VSELECT, dl, SelVT, C, V0, V1); + }; - // a += a - Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); - OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); - OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); - - // r = VSELECT(r, shl(r, 2), a); - M = DAG.getNode(ISD::SHL, dl, VT, R, DAG.getConstant(2, dl, VT)); - R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); + // Turn 'a' into a mask suitable for VSELECT: a = a << 5; + // We can safely do this using i16 shifts as we're only interested in + // the 3 lower bits of each byte. + Amt = DAG.getNode(ISD::BITCAST, dl, ExtVT, Amt); + Amt = DAG.getNode(ISD::SHL, dl, ExtVT, Amt, DAG.getConstant(5, dl, ExtVT)); + Amt = DAG.getNode(ISD::BITCAST, dl, VT, Amt); + + if (Op->getOpcode() == ISD::SHL || Op->getOpcode() == ISD::SRL) { + // r = VSELECT(r, shift(r, 4), a); + SDValue M = + DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT)); + R = SignBitSelect(VT, Amt, M, R); + + // a += a + Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); + + // r = VSELECT(r, shift(r, 2), a); + M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT)); + R = SignBitSelect(VT, Amt, M, R); + + // a += a + Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); + + // return VSELECT(r, shift(r, 1), a); + M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT)); + R = SignBitSelect(VT, Amt, M, R); + return R; + } - // a += a - Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); - OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); - OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); - - // return VSELECT(r, r+r, a); - R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, - DAG.getNode(ISD::ADD, dl, VT, R, R), R); - return R; + if (Op->getOpcode() == ISD::SRA) { + // For SRA we need to unpack each byte to the higher byte of a i16 vector + // so we can correctly sign extend. We don't care what happens to the + // lower byte. + SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt); + SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt); + SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R); + SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R); + ALo = DAG.getNode(ISD::BITCAST, dl, ExtVT, ALo); + AHi = DAG.getNode(ISD::BITCAST, dl, ExtVT, AHi); + RLo = DAG.getNode(ISD::BITCAST, dl, ExtVT, RLo); + RHi = DAG.getNode(ISD::BITCAST, dl, ExtVT, RHi); + + // r = VSELECT(r, shift(r, 4), a); + SDValue MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo, + DAG.getConstant(4, dl, ExtVT)); + SDValue MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi, + DAG.getConstant(4, dl, ExtVT)); + RLo = SignBitSelect(ExtVT, ALo, MLo, RLo); + RHi = SignBitSelect(ExtVT, AHi, MHi, RHi); + + // a += a + ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo); + AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi); + + // r = VSELECT(r, shift(r, 2), a); + MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo, + DAG.getConstant(2, dl, ExtVT)); + MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi, + DAG.getConstant(2, dl, ExtVT)); + RLo = SignBitSelect(ExtVT, ALo, MLo, RLo); + RHi = SignBitSelect(ExtVT, AHi, MHi, RHi); + + // a += a + ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo); + AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi); + + // r = VSELECT(r, shift(r, 1), a); + MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo, + DAG.getConstant(1, dl, ExtVT)); + MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi, + DAG.getConstant(1, dl, ExtVT)); + RLo = SignBitSelect(ExtVT, ALo, MLo, RLo); + RHi = SignBitSelect(ExtVT, AHi, MHi, RHi); + + // Logical shift the result back to the lower byte, leaving a zero upper + // byte + // meaning that we can safely pack with PACKUSWB. + RLo = + DAG.getNode(ISD::SRL, dl, ExtVT, RLo, DAG.getConstant(8, dl, ExtVT)); + RHi = + DAG.getNode(ISD::SRL, dl, ExtVT, RHi, DAG.getConstant(8, dl, ExtVT)); + return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); + } } // It's worth extending once and using the v8i32 shifts for 16-bit types, but @@ -16731,6 +16810,67 @@ DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt)); } + if (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget->hasInt256())) { + unsigned ShiftOpcode = Op->getOpcode(); + + auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) { + // On SSE41 targets we make use of the fact that VSELECT lowers + // to PBLENDVB which selects bytes based just on the sign bit. + if (Subtarget->hasSSE41()) { + MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2); + V0 = DAG.getNode(ISD::BITCAST, dl, ExtVT, V0); + V1 = DAG.getNode(ISD::BITCAST, dl, ExtVT, V1); + Sel = DAG.getNode(ISD::BITCAST, dl, ExtVT, Sel); + return DAG.getNode(ISD::BITCAST, dl, VT, + DAG.getNode(ISD::VSELECT, dl, ExtVT, Sel, V0, V1)); + } + // On pre-SSE41 targets we splat the sign bit - a negative value will + // set all bits of the lanes to true and VSELECT uses that in + // its OR(AND(V0,C),AND(V1,~C)) lowering. + SDValue C = DAG.getNode(ISD::SRA, dl, VT, Sel, + DAG.getConstant(15, dl, VT)); + return DAG.getNode(ISD::VSELECT, dl, VT, C, V0, V1); + }; + + // Turn 'a' into a mask suitable for VSELECT: a = a << 12; + if (Subtarget->hasSSE41()) { + // On SSE41 targets we need to replicate the shift mask in both + // bytes for PBLENDVB. + Amt = DAG.getNode( + ISD::OR, dl, VT, + DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)), + DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT))); + } else { + Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)); + } + + // r = VSELECT(r, shift(r, 8), a); + SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT)); + R = SignBitSelect(Amt, M, R); + + // a += a + Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); + + // r = VSELECT(r, shift(r, 4), a); + M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT)); + R = SignBitSelect(Amt, M, R); + + // a += a + Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); + + // r = VSELECT(r, shift(r, 2), a); + M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT)); + R = SignBitSelect(Amt, M, R); + + // a += a + Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); + + // return VSELECT(r, shift(r, 1), a); + M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT)); + R = SignBitSelect(Amt, M, R); + return R; + } + // Decompose 256-bit shifts into smaller 128-bit shifts. if (VT.is256BitVector()) { unsigned NumElems = VT.getVectorNumElements(); Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -144,21 +144,21 @@ { ISD::SRA, MVT::v8i32, 1 }, { ISD::SHL, MVT::v2i64, 1 }, { ISD::SRL, MVT::v2i64, 1 }, - { ISD::SHL, MVT::v4i64, 1 }, - { ISD::SRL, MVT::v4i64, 1 }, - - { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence. - { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized. - - { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized. - { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized. - - { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized. - { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized. - { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. - - // Vectorizing division is a bad idea. See the SSE2 table for more comments. - { ISD::SDIV, MVT::v32i8, 32*20 }, + { ISD::SHL, MVT::v4i64, 1 }, + { ISD::SRL, MVT::v4i64, 1 }, + + { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. + { ISD::SHL, MVT::v16i16, 14 }, // vpblendvb sequence. + + { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. + { ISD::SRL, MVT::v16i16, 14 }, // vpblendvb sequence. + + { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. + { ISD::SRA, MVT::v16i16, 14 }, // vpblendvb sequence. + { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. + + // Vectorizing division is a bad idea. See the SSE2 table for more comments. + { ISD::SDIV, MVT::v32i8, 32*20 }, { ISD::SDIV, MVT::v16i16, 16*20 }, { ISD::SDIV, MVT::v8i32, 8*20 }, { ISD::SDIV, MVT::v4i64, 4*20 }, @@ -244,25 +244,25 @@ // For some cases, where the shift amount is a scalar we would be able // to generate better code. Unfortunately, when this is the case the value // (the splat) will get hoisted out of the loop, thereby making it invisible - // to ISel. The cost model must return worst case assumptions because it is - // used for vectorization and we don't want to make vectorized code worse - // than scalar code. - { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. - { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized. - { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. - { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. - { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized. - - { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. - { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized. - { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized. - { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. - - { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. - { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. - { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. - { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. - + // to ISel. The cost model must return worst case assumptions because it is + // used for vectorization and we don't want to make vectorized code worse + // than scalar code. + { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. + { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. + { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. + { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. + { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized. + + { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. + { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. + { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized. + { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. + + { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. + { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. + { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. + { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. + // It is not a good idea to vectorize division. We have to scalarize it and // in the process we will often end up having to spilling regular // registers. The overhead of division is going to dominate most kernels Index: test/Analysis/CostModel/X86/testshiftashr.ll =================================================================== --- test/Analysis/CostModel/X86/testshiftashr.ll +++ test/Analysis/CostModel/X86/testshiftashr.ll @@ -29,9 +29,9 @@ define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) { entry: ; SSE2: shift8i16 - ; SSE2: cost of 80 {{.*}} ashr + ; SSE2: cost of 32 {{.*}} ashr ; SSE2-CODEGEN: shift8i16 - ; SSE2-CODEGEN: sarw %cl + ; SSE2-CODEGEN: psraw %0 = ashr %shifttype8i16 %a , %b ret %shifttype8i16 %0 @@ -41,9 +41,9 @@ define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) { entry: ; SSE2: shift16i16 - ; SSE2: cost of 160 {{.*}} ashr + ; SSE2: cost of 64 {{.*}} ashr ; SSE2-CODEGEN: shift16i16 - ; SSE2-CODEGEN: sarw %cl + ; SSE2-CODEGEN: psraw %0 = ashr %shifttype16i16 %a , %b ret %shifttype16i16 %0 @@ -53,9 +53,9 @@ define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) { entry: ; SSE2: shift32i16 - ; SSE2: cost of 320 {{.*}} ashr + ; SSE2: cost of 128 {{.*}} ashr ; SSE2-CODEGEN: shift32i16 - ; SSE2-CODEGEN: sarw %cl + ; SSE2-CODEGEN: psraw %0 = ashr %shifttype32i16 %a , %b ret %shifttype32i16 %0 @@ -209,9 +209,9 @@ define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) { entry: ; SSE2: shift8i8 - ; SSE2: cost of 80 {{.*}} ashr + ; SSE2: cost of 32 {{.*}} ashr ; SSE2-CODEGEN: shift8i8 - ; SSE2-CODEGEN: sarw %cl + ; SSE2-CODEGEN: psraw %0 = ashr %shifttype8i8 %a , %b ret %shifttype8i8 %0 @@ -221,9 +221,9 @@ define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) { entry: ; SSE2: shift16i8 - ; SSE2: cost of 160 {{.*}} ashr + ; SSE2: cost of 54 {{.*}} ashr ; SSE2-CODEGEN: shift16i8 - ; SSE2-CODEGEN: sarb %cl + ; SSE2-CODEGEN: psraw %0 = ashr %shifttype16i8 %a , %b ret %shifttype16i8 %0 @@ -233,9 +233,9 @@ define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) { entry: ; SSE2: shift32i8 - ; SSE2: cost of 320 {{.*}} ashr + ; SSE2: cost of 108 {{.*}} ashr ; SSE2-CODEGEN: shift32i8 - ; SSE2-CODEGEN: sarb %cl + ; SSE2-CODEGEN: psraw %0 = ashr %shifttype32i8 %a , %b ret %shifttype32i8 %0 Index: test/Analysis/CostModel/X86/testshiftlshr.ll =================================================================== --- test/Analysis/CostModel/X86/testshiftlshr.ll +++ test/Analysis/CostModel/X86/testshiftlshr.ll @@ -29,9 +29,9 @@ define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) { entry: ; SSE2: shift8i16 - ; SSE2: cost of 80 {{.*}} lshr + ; SSE2: cost of 32 {{.*}} lshr ; SSE2-CODEGEN: shift8i16 - ; SSE2-CODEGEN: shrl %cl + ; SSE2-CODEGEN: psrlw %0 = lshr %shifttype8i16 %a , %b ret %shifttype8i16 %0 @@ -41,9 +41,9 @@ define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) { entry: ; SSE2: shift16i16 - ; SSE2: cost of 160 {{.*}} lshr + ; SSE2: cost of 64 {{.*}} lshr ; SSE2-CODEGEN: shift16i16 - ; SSE2-CODEGEN: shrl %cl + ; SSE2-CODEGEN: psrlw %0 = lshr %shifttype16i16 %a , %b ret %shifttype16i16 %0 @@ -53,9 +53,9 @@ define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) { entry: ; SSE2: shift32i16 - ; SSE2: cost of 320 {{.*}} lshr + ; SSE2: cost of 128 {{.*}} lshr ; SSE2-CODEGEN: shift32i16 - ; SSE2-CODEGEN: shrl %cl + ; SSE2-CODEGEN: psrlw %0 = lshr %shifttype32i16 %a , %b ret %shifttype32i16 %0 @@ -209,9 +209,9 @@ define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) { entry: ; SSE2: shift8i8 - ; SSE2: cost of 80 {{.*}} lshr + ; SSE2: cost of 32 {{.*}} lshr ; SSE2-CODEGEN: shift8i8 - ; SSE2-CODEGEN: shrl %cl + ; SSE2-CODEGEN: psrlw %0 = lshr %shifttype8i8 %a , %b ret %shifttype8i8 %0 @@ -221,9 +221,9 @@ define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) { entry: ; SSE2: shift16i8 - ; SSE2: cost of 160 {{.*}} lshr + ; SSE2: cost of 26 {{.*}} lshr ; SSE2-CODEGEN: shift16i8 - ; SSE2-CODEGEN: shrb %cl + ; SSE2-CODEGEN: psrlw %0 = lshr %shifttype16i8 %a , %b ret %shifttype16i8 %0 @@ -233,9 +233,9 @@ define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) { entry: ; SSE2: shift32i8 - ; SSE2: cost of 320 {{.*}} lshr + ; SSE2: cost of 52 {{.*}} lshr ; SSE2-CODEGEN: shift32i8 - ; SSE2-CODEGEN: shrb %cl + ; SSE2-CODEGEN: psrlw %0 = lshr %shifttype32i8 %a , %b ret %shifttype32i8 %0 Index: test/Analysis/CostModel/X86/testshiftshl.ll =================================================================== --- test/Analysis/CostModel/X86/testshiftshl.ll +++ test/Analysis/CostModel/X86/testshiftshl.ll @@ -29,9 +29,9 @@ define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) { entry: ; SSE2: shift8i16 - ; SSE2: cost of 80 {{.*}} shl + ; SSE2: cost of 32 {{.*}} shl ; SSE2-CODEGEN: shift8i16 - ; SSE2-CODEGEN: shll %cl + ; SSE2-CODEGEN: psllw %0 = shl %shifttype8i16 %a , %b ret %shifttype8i16 %0 @@ -41,9 +41,9 @@ define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) { entry: ; SSE2: shift16i16 - ; SSE2: cost of 160 {{.*}} shl + ; SSE2: cost of 64 {{.*}} shl ; SSE2-CODEGEN: shift16i16 - ; SSE2-CODEGEN: shll %cl + ; SSE2-CODEGEN: psllw %0 = shl %shifttype16i16 %a , %b ret %shifttype16i16 %0 @@ -53,9 +53,9 @@ define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) { entry: ; SSE2: shift32i16 - ; SSE2: cost of 320 {{.*}} shl + ; SSE2: cost of 128 {{.*}} shl ; SSE2-CODEGEN: shift32i16 - ; SSE2-CODEGEN: shll %cl + ; SSE2-CODEGEN: psllw %0 = shl %shifttype32i16 %a , %b ret %shifttype32i16 %0 @@ -209,9 +209,9 @@ define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) { entry: ; SSE2: shift8i8 - ; SSE2: cost of 80 {{.*}} shl + ; SSE2: cost of 32 {{.*}} shl ; SSE2-CODEGEN: shift8i8 - ; SSE2-CODEGEN: shll + ; SSE2-CODEGEN: psllw %0 = shl %shifttype8i8 %a , %b ret %shifttype8i8 %0 @@ -221,9 +221,9 @@ define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) { entry: ; SSE2: shift16i8 - ; SSE2: cost of 30 {{.*}} shl + ; SSE2: cost of 26 {{.*}} shl ; SSE2-CODEGEN: shift16i8 - ; SSE2-CODEGEN: cmpeqb + ; SSE2-CODEGEN: psllw %0 = shl %shifttype16i8 %a , %b ret %shifttype16i8 %0 @@ -233,9 +233,9 @@ define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) { entry: ; SSE2: shift32i8 - ; SSE2: cost of 60 {{.*}} shl + ; SSE2: cost of 52 {{.*}} shl ; SSE2-CODEGEN: shift32i8 - ; SSE2-CODEGEN: cmpeqb + ; SSE2-CODEGEN: psllw %0 = shl %shifttype32i8 %a , %b ret %shifttype32i8 %0 Index: test/CodeGen/X86/2011-12-15-vec_shift.ll =================================================================== --- test/CodeGen/X86/2011-12-15-vec_shift.ll +++ test/CodeGen/X86/2011-12-15-vec_shift.ll @@ -12,8 +12,8 @@ ; Make sure we're masking and pcmp'ing the VSELECT conditon vector. ; CHECK-WO-SSE4: psllw $5, [[REG1:%xmm.]] - ; CHECK-WO-SSE4: pand [[REG1]], [[REG2:%xmm.]] - ; CHECK-WO-SSE4: pcmpeqb {{%xmm., }}[[REG2]] + ; CHECK-WO-SSE4: pxor [[REG2:%xmm.]], [[REG2:%xmm.]] + ; CHECK-WO-SSE4: pcmpgtb {{%xmm., }}[[REG2]] %1 = shl <16 x i8> %a, %b ret <16 x i8> %1 } Index: test/CodeGen/X86/avx2-vector-shifts.ll =================================================================== --- test/CodeGen/X86/avx2-vector-shifts.ll +++ test/CodeGen/X86/avx2-vector-shifts.ll @@ -1,268 +1,403 @@ -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s - -; AVX2 Logical Shift Left - -define <16 x i16> @test_sllw_1(<16 x i16> %InVec) { -entry: - %shl = shl <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_sllw_1: -; CHECK-NOT: vpsllw $0, %ymm0, %ymm0 -; CHECK: ret - -define <16 x i16> @test_sllw_2(<16 x i16> %InVec) { -entry: - %shl = shl <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_sllw_2: -; CHECK: vpaddw %ymm0, %ymm0, %ymm0 -; CHECK: ret - -define <16 x i16> @test_sllw_3(<16 x i16> %InVec) { -entry: - %shl = shl <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_sllw_3: -; CHECK: vpsllw $15, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_slld_1(<8 x i32> %InVec) { -entry: - %shl = shl <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_slld_1: -; CHECK-NOT: vpslld $0, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_slld_2(<8 x i32> %InVec) { -entry: - %shl = shl <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_slld_2: -; CHECK: vpaddd %ymm0, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_vpslld_var(i32 %shift) { - %amt = insertelement <8 x i32> undef, i32 %shift, i32 0 - %tmp = shl <8 x i32> , %amt - ret <8 x i32> %tmp -} - -; CHECK-LABEL: test_vpslld_var: -; CHECK: vpslld %xmm0, %ymm1, %ymm0 -; CHECK: ret - -define <8 x i32> @test_slld_3(<8 x i32> %InVec) { -entry: - %shl = shl <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_slld_3: -; CHECK: vpslld $31, %ymm0, %ymm0 -; CHECK: ret - -define <4 x i64> @test_sllq_1(<4 x i64> %InVec) { -entry: - %shl = shl <4 x i64> %InVec, - ret <4 x i64> %shl -} - -; CHECK-LABEL: test_sllq_1: -; CHECK-NOT: vpsllq $0, %ymm0, %ymm0 -; CHECK: ret - -define <4 x i64> @test_sllq_2(<4 x i64> %InVec) { -entry: - %shl = shl <4 x i64> %InVec, - ret <4 x i64> %shl -} - -; CHECK-LABEL: test_sllq_2: -; CHECK: vpaddq %ymm0, %ymm0, %ymm0 -; CHECK: ret - -define <4 x i64> @test_sllq_3(<4 x i64> %InVec) { -entry: - %shl = shl <4 x i64> %InVec, - ret <4 x i64> %shl -} - -; CHECK-LABEL: test_sllq_3: -; CHECK: vpsllq $63, %ymm0, %ymm0 -; CHECK: ret - -; AVX2 Arithmetic Shift - -define <16 x i16> @test_sraw_1(<16 x i16> %InVec) { -entry: - %shl = ashr <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_sraw_1: -; CHECK-NOT: vpsraw $0, %ymm0, %ymm0 -; CHECK: ret - -define <16 x i16> @test_sraw_2(<16 x i16> %InVec) { -entry: - %shl = ashr <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_sraw_2: -; CHECK: vpsraw $1, %ymm0, %ymm0 -; CHECK: ret - -define <16 x i16> @test_sraw_3(<16 x i16> %InVec) { -entry: - %shl = ashr <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_sraw_3: -; CHECK: vpsraw $15, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_srad_1(<8 x i32> %InVec) { -entry: - %shl = ashr <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_srad_1: -; CHECK-NOT: vpsrad $0, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_srad_2(<8 x i32> %InVec) { -entry: - %shl = ashr <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_srad_2: -; CHECK: vpsrad $1, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_srad_3(<8 x i32> %InVec) { -entry: - %shl = ashr <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_srad_3: -; CHECK: vpsrad $31, %ymm0, %ymm0 -; CHECK: ret - -; SSE Logical Shift Right - -define <16 x i16> @test_srlw_1(<16 x i16> %InVec) { -entry: - %shl = lshr <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_srlw_1: -; CHECK-NOT: vpsrlw $0, %ymm0, %ymm0 -; CHECK: ret - -define <16 x i16> @test_srlw_2(<16 x i16> %InVec) { -entry: - %shl = lshr <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_srlw_2: -; CHECK: vpsrlw $1, %ymm0, %ymm0 -; CHECK: ret - -define <16 x i16> @test_srlw_3(<16 x i16> %InVec) { -entry: - %shl = lshr <16 x i16> %InVec, - ret <16 x i16> %shl -} - -; CHECK-LABEL: test_srlw_3: -; CHECK: vpsrlw $15, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_srld_1(<8 x i32> %InVec) { -entry: - %shl = lshr <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_srld_1: -; CHECK-NOT: vpsrld $0, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_srld_2(<8 x i32> %InVec) { -entry: - %shl = lshr <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_srld_2: -; CHECK: vpsrld $1, %ymm0, %ymm0 -; CHECK: ret - -define <8 x i32> @test_srld_3(<8 x i32> %InVec) { -entry: - %shl = lshr <8 x i32> %InVec, - ret <8 x i32> %shl -} - -; CHECK-LABEL: test_srld_3: -; CHECK: vpsrld $31, %ymm0, %ymm0 -; CHECK: ret - -define <4 x i64> @test_srlq_1(<4 x i64> %InVec) { -entry: - %shl = lshr <4 x i64> %InVec, - ret <4 x i64> %shl -} - -; CHECK-LABEL: test_srlq_1: -; CHECK-NOT: vpsrlq $0, %ymm0, %ymm0 -; CHECK: ret - -define <4 x i64> @test_srlq_2(<4 x i64> %InVec) { -entry: - %shl = lshr <4 x i64> %InVec, - ret <4 x i64> %shl -} - -; CHECK-LABEL: test_srlq_2: -; CHECK: vpsrlq $1, %ymm0, %ymm0 -; CHECK: ret - -define <4 x i64> @test_srlq_3(<4 x i64> %InVec) { -entry: - %shl = lshr <4 x i64> %InVec, - ret <4 x i64> %shl -} - -; CHECK-LABEL: test_srlq_3: -; CHECK: vpsrlq $63, %ymm0, %ymm0 -; CHECK: ret - -; CHECK-LABEL: @srl_trunc_and_v4i64 -; CHECK: vpand -; CHECK-NEXT: vpsrlvd -; CHECK: ret -define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind { - %and = and <4 x i64> %y, - %trunc = trunc <4 x i64> %and to <4 x i32> - %sra = lshr <4 x i32> %x, %trunc - ret <4 x i32> %sra -} +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s + +; AVX2 Logical Shift Left + +define <16 x i16> @test_sllw_1(<16 x i16> %InVec) { +entry: + %shl = shl <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_sllw_1: +; CHECK-NOT: vpsllw $0, %ymm0, %ymm0 +; CHECK: ret + +define <16 x i16> @test_sllw_2(<16 x i16> %InVec) { +entry: + %shl = shl <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_sllw_2: +; CHECK: vpaddw %ymm0, %ymm0, %ymm0 +; CHECK: ret + +define <16 x i16> @test_sllw_3(<16 x i16> %InVec) { +entry: + %shl = shl <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_sllw_3: +; CHECK: vpsllw $15, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_slld_1(<8 x i32> %InVec) { +entry: + %shl = shl <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_slld_1: +; CHECK-NOT: vpslld $0, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_slld_2(<8 x i32> %InVec) { +entry: + %shl = shl <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_slld_2: +; CHECK: vpaddd %ymm0, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_vpslld_var(i32 %shift) { + %amt = insertelement <8 x i32> undef, i32 %shift, i32 0 + %tmp = shl <8 x i32> , %amt + ret <8 x i32> %tmp +} + +; CHECK-LABEL: test_vpslld_var: +; CHECK: vpslld %xmm0, %ymm1, %ymm0 +; CHECK: ret + +define <8 x i32> @test_slld_3(<8 x i32> %InVec) { +entry: + %shl = shl <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_slld_3: +; CHECK: vpslld $31, %ymm0, %ymm0 +; CHECK: ret + +define <4 x i64> @test_sllq_1(<4 x i64> %InVec) { +entry: + %shl = shl <4 x i64> %InVec, + ret <4 x i64> %shl +} + +; CHECK-LABEL: test_sllq_1: +; CHECK-NOT: vpsllq $0, %ymm0, %ymm0 +; CHECK: ret + +define <4 x i64> @test_sllq_2(<4 x i64> %InVec) { +entry: + %shl = shl <4 x i64> %InVec, + ret <4 x i64> %shl +} + +; CHECK-LABEL: test_sllq_2: +; CHECK: vpaddq %ymm0, %ymm0, %ymm0 +; CHECK: ret + +define <4 x i64> @test_sllq_3(<4 x i64> %InVec) { +entry: + %shl = shl <4 x i64> %InVec, + ret <4 x i64> %shl +} + +; CHECK-LABEL: test_sllq_3: +; CHECK: vpsllq $63, %ymm0, %ymm0 +; CHECK: ret + +; AVX2 Arithmetic Shift + +define <16 x i16> @test_sraw_1(<16 x i16> %InVec) { +entry: + %shl = ashr <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_sraw_1: +; CHECK-NOT: vpsraw $0, %ymm0, %ymm0 +; CHECK: ret + +define <16 x i16> @test_sraw_2(<16 x i16> %InVec) { +entry: + %shl = ashr <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_sraw_2: +; CHECK: vpsraw $1, %ymm0, %ymm0 +; CHECK: ret + +define <16 x i16> @test_sraw_3(<16 x i16> %InVec) { +entry: + %shl = ashr <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_sraw_3: +; CHECK: vpsraw $15, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_srad_1(<8 x i32> %InVec) { +entry: + %shl = ashr <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_srad_1: +; CHECK-NOT: vpsrad $0, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_srad_2(<8 x i32> %InVec) { +entry: + %shl = ashr <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_srad_2: +; CHECK: vpsrad $1, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_srad_3(<8 x i32> %InVec) { +entry: + %shl = ashr <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_srad_3: +; CHECK: vpsrad $31, %ymm0, %ymm0 +; CHECK: ret + +; SSE Logical Shift Right + +define <16 x i16> @test_srlw_1(<16 x i16> %InVec) { +entry: + %shl = lshr <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_srlw_1: +; CHECK-NOT: vpsrlw $0, %ymm0, %ymm0 +; CHECK: ret + +define <16 x i16> @test_srlw_2(<16 x i16> %InVec) { +entry: + %shl = lshr <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_srlw_2: +; CHECK: vpsrlw $1, %ymm0, %ymm0 +; CHECK: ret + +define <16 x i16> @test_srlw_3(<16 x i16> %InVec) { +entry: + %shl = lshr <16 x i16> %InVec, + ret <16 x i16> %shl +} + +; CHECK-LABEL: test_srlw_3: +; CHECK: vpsrlw $15, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_srld_1(<8 x i32> %InVec) { +entry: + %shl = lshr <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_srld_1: +; CHECK-NOT: vpsrld $0, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_srld_2(<8 x i32> %InVec) { +entry: + %shl = lshr <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_srld_2: +; CHECK: vpsrld $1, %ymm0, %ymm0 +; CHECK: ret + +define <8 x i32> @test_srld_3(<8 x i32> %InVec) { +entry: + %shl = lshr <8 x i32> %InVec, + ret <8 x i32> %shl +} + +; CHECK-LABEL: test_srld_3: +; CHECK: vpsrld $31, %ymm0, %ymm0 +; CHECK: ret + +define <4 x i64> @test_srlq_1(<4 x i64> %InVec) { +entry: + %shl = lshr <4 x i64> %InVec, + ret <4 x i64> %shl +} + +; CHECK-LABEL: test_srlq_1: +; CHECK-NOT: vpsrlq $0, %ymm0, %ymm0 +; CHECK: ret + +define <4 x i64> @test_srlq_2(<4 x i64> %InVec) { +entry: + %shl = lshr <4 x i64> %InVec, + ret <4 x i64> %shl +} + +; CHECK-LABEL: test_srlq_2: +; CHECK: vpsrlq $1, %ymm0, %ymm0 +; CHECK: ret + +define <4 x i64> @test_srlq_3(<4 x i64> %InVec) { +entry: + %shl = lshr <4 x i64> %InVec, + ret <4 x i64> %shl +} + +; CHECK-LABEL: test_srlq_3: +; CHECK: vpsrlq $63, %ymm0, %ymm0 +; CHECK: ret + +; CHECK-LABEL: @srl_trunc_and_v4i64 +; CHECK: vpand +; CHECK-NEXT: vpsrlvd +; CHECK: ret +define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind { + %and = and <4 x i64> %y, + %trunc = trunc <4 x i64> %and to <4 x i32> + %sra = lshr <4 x i32> %x, %trunc + ret <4 x i32> %sra +} + +; +; Vectorized byte shifts +; + +define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind { +; CHECK-LABEL: shl_16i16 +; CHECK: vpsllw $12, %ymm1, %ymm2 +; CHECK-NEXT: vpsllw $4, %ymm1, %ymm1 +; CHECK-NEXT: vpor %ymm2, %ymm1, %ymm1 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm2 +; CHECK-NEXT: vpsllw $8, %ymm0, %ymm3 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsllw $4, %ymm0, %ymm1 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpsllw $2, %ymm0, %ymm1 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpsllw $1, %ymm0, %ymm1 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: retq + %shl = shl <16 x i16> %r, %a + ret <16 x i16> %shl +} + +define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { +; CHECK-LABEL: shl_32i8 +; CHECK: vpsllw $5, %ymm1, %ymm1 +; CHECK-NEXT: vpsllw $4, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vpsllw $2, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: retq + %shl = shl <32 x i8> %r, %a + ret <32 x i8> %shl +} + +define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind { +; CHECK-LABEL: ashr_16i16 +; CHECK: vpsllw $12, %ymm1, %ymm2 +; CHECK-NEXT: vpsllw $4, %ymm1, %ymm1 +; CHECK-NEXT: vpor %ymm2, %ymm1, %ymm1 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm2 +; CHECK-NEXT: vpsraw $8, %ymm0, %ymm3 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsraw $4, %ymm0, %ymm1 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpsraw $2, %ymm0, %ymm1 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpsraw $1, %ymm0, %ymm1 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: retq + %ashr = ashr <16 x i16> %r, %a + ret <16 x i16> %ashr +} + +define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { +; CHECK-LABEL: ashr_32i8 +; CHECK: vpsllw $5, %ymm1, %ymm1 +; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; CHECK-NEXT: vpsraw $4, %ymm3, %ymm4 +; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; CHECK-NEXT: vpsraw $2, %ymm3, %ymm4 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; CHECK-NEXT: vpsraw $1, %ymm3, %ymm4 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 +; CHECK-NEXT: vpsrlw $8, %ymm2, %ymm2 +; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; CHECK-NEXT: vpsraw $4, %ymm0, %ymm3 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsraw $2, %ymm0, %ymm3 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsraw $1, %ymm0, %ymm3 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0 +; CHECK-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: retq + %ashr = ashr <32 x i8> %r, %a + ret <32 x i8> %ashr +} + +define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind { +; CHECK-LABEL: lshr_16i16 +; CHECK: vpsllw $12, %ymm1, %ymm2 +; CHECK-NEXT: vpsllw $4, %ymm1, %ymm1 +; CHECK-NEXT: vpor %ymm2, %ymm1, %ymm1 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm2 +; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm3 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $4, %ymm0, %ymm1 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $2, %ymm0, %ymm1 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $1, %ymm0, %ymm1 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: retq + %lshr = lshr <16 x i16> %r, %a + ret <16 x i16> %lshr +} + +define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { +; CHECK-LABEL: lshr_32i8 +; CHECK: vpsllw $5, %ymm1, %ymm1 +; CHECK-NEXT: vpsrlw $4, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $2, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $1, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: retq + %lshr = lshr <32 x i8> %r, %a + ret <32 x i8> %lshr +} Index: test/CodeGen/X86/vec_shift8.ll =================================================================== --- test/CodeGen/X86/vec_shift8.ll +++ test/CodeGen/X86/vec_shift8.ll @@ -0,0 +1,529 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX + +; +; Vectorized integer shifts +; + +define <2 x i64> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp { +entry: +; ALL-NOT: shlw +; +; SSE2: psllw $12, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psllw $8, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psllw $4, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psllw $2, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: psraw $15, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: psllw $1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psllw $12, %xmm0 +; SSE41-NEXT: psllw $4, %xmm1 +; SSE41-NEXT: por %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: psllw $8, %xmm4 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psllw $4, %xmm1 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psllw $2, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psllw $1, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX: vpsllw $12, %xmm1, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm2 +; AVX-NEXT: vpsllw $8, %xmm0, %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm1 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $2, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $1, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %shl = shl <8 x i16> %r, %a + %tmp2 = bitcast <8 x i16> %shl to <2 x i64> + ret <2 x i64> %tmp2 +} + +define <2 x i64> @shl_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { +entry: +; ALL-NOT: shlb +; +; SSE2: psllw $5, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psllw $4, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psllw $2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: pandn %xmm0, %xmm1 +; SSE2-NEXT: paddb %xmm0, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllw $5, %xmm1 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psllw $4, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psllw $2, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX: vpsllw $5, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $2, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: retq + %shl = shl <16 x i8> %r, %a + %tmp2 = bitcast <16 x i8> %shl to <2 x i64> + ret <2 x i64> %tmp2 +} + +define <2 x i64> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp { +entry: +; ALL-NOT: sarb +; +; SSE2: psllw $12, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psraw $8, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psraw $4, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psraw $2, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: psraw $15, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: psraw $1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psllw $12, %xmm0 +; SSE41-NEXT: psllw $4, %xmm1 +; SSE41-NEXT: por %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: psraw $8, %xmm4 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psraw $4, %xmm1 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psraw $2, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psraw $1, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX: vpsllw $12, %xmm1, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm2 +; AVX-NEXT: vpsraw $8, %xmm0, %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $4, %xmm0, %xmm1 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $2, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $1, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %ashr = ashr <8 x i16> %r, %a + %tmp2 = bitcast <8 x i16> %ashr to <2 x i64> + ret <2 x i64> %tmp2 +} + +define <2 x i64> @ashr_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { +entry: +; ALL-NOT: sarb +; +; SSE2: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSE2-NEXT: psllw $5, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm6 +; SSE2-NEXT: pandn %xmm2, %xmm6 +; SSE2-NEXT: psraw $4, %xmm2 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: por %xmm6, %xmm2 +; SSE2-NEXT: paddw %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm6 +; SSE2-NEXT: pandn %xmm2, %xmm6 +; SSE2-NEXT: psraw $2, %xmm2 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: por %xmm6, %xmm2 +; SSE2-NEXT: paddw %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm4 +; SSE2-NEXT: pandn %xmm2, %xmm4 +; SSE2-NEXT: psraw $1, %xmm2 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: psrlw $8, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: pandn %xmm0, %xmm5 +; SSE2-NEXT: psraw $4, %xmm0 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: pandn %xmm0, %xmm5 +; SSE2-NEXT: psraw $2, %xmm0 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: pandn %xmm0, %xmm1 +; SSE2-NEXT: psraw $1, %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: packuswb %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllw $5, %xmm1 +; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psraw $4, %xmm4 +; SSE41-NEXT: pblendvb %xmm4, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psraw $2, %xmm4 +; SSE41-NEXT: paddw %xmm0, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psraw $1, %xmm4 +; SSE41-NEXT: paddw %xmm0, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm3 +; SSE41-NEXT: psrlw $8, %xmm3 +; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $4, %xmm2 +; SSE41-NEXT: pblendvb %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $2, %xmm2 +; SSE41-NEXT: paddw %xmm0, %xmm0 +; SSE41-NEXT: pblendvb %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $1, %xmm2 +; SSE41-NEXT: paddw %xmm0, %xmm0 +; SSE41-NEXT: pblendvb %xmm2, %xmm1 +; SSE41-NEXT: psrlw $8, %xmm1 +; SSE41-NEXT: packuswb %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX: vpsllw $5, %xmm1, %xmm1 +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 +; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 +; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 +; AVX-NEXT: retq + %ashr = ashr <16 x i8> %r, %a + %tmp2 = bitcast <16 x i8> %ashr to <2 x i64> + ret <2 x i64> %tmp2 +} + +define <2 x i64> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp { +entry: +; ALL-NOT: shrb +; +; SSE2: psllw $12, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: psraw $15, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psllw $12, %xmm0 +; SSE41-NEXT: psllw $4, %xmm1 +; SSE41-NEXT: por %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: psrlw $8, %xmm4 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psrlw $4, %xmm1 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psrlw $2, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psrlw $1, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX: vpsllw $12, %xmm1, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm2 +; AVX-NEXT: vpsrlw $8, %xmm0, %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $2, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $1, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %lshr = lshr <8 x i16> %r, %a + %tmp2 = bitcast <8 x i16> %lshr to <2 x i64> + ret <2 x i64> %tmp2 +} + +define <2 x i64> @lshr_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { +entry: +; ALL-NOT: shrb +; +; SSE2: psllw $5, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: pandn %xmm0, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllw $5, %xmm1 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlw $4, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlw $2, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlw $1, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX: vpsllw $5, %xmm1, %xmm1 +; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: retq + %lshr = lshr <16 x i8> %r, %a + %tmp2 = bitcast <16 x i8> %lshr to <2 x i64> + ret <2 x i64> %tmp2 +}