Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -718,6 +718,41 @@ break; } + case Intrinsic::x86_sse41_pblendvb: + case Intrinsic::x86_sse41_blendvps: + case Intrinsic::x86_sse41_blendvpd: + case Intrinsic::x86_avx_blendv_ps_256: + case Intrinsic::x86_avx_blendv_pd_256: + case Intrinsic::x86_avx2_pblendvb: { + // Convert blendv* to vector selects if the mask is constant. + // This optimization is convoluted because the intrinsic is defined as + // getting a vector of floats or doubles for the ps and pd versions. + // FIXME: That should be changed. + Value *Mask = II->getArgOperand(2); + if (auto C = dyn_cast(Mask)) { + auto Tyi1 = Builder->getInt1Ty(); + auto SelectorType = cast(Mask->getType()); + auto EltTy = SelectorType->getElementType(); + unsigned Size = SelectorType->getNumElements(); + unsigned BitWidth = EltTy->isFloatTy() ? 32 : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth()); + assert(BitWidth == 64 || BitWidth == 32 || BitWidth == 8 && "Wrong arguments for variable blend intrinsic"); + SmallVector Selectors; + for (unsigned I = 0; I < Size; ++I) { + // The intrinsics only read the top bit + uint64_t Selector; + if (BitWidth == 8) + Selector = C->getElementAsInteger(I); + else + Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue(); + Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1))); + } + auto NewSelector = ConstantVector::get(Selectors); + return SelectInst::Create(NewSelector, II->getArgOperand(0), II->getArgOperand(1), "blendv"); + } else { + break; + } + } + case Intrinsic::x86_avx_vpermilvar_ps: case Intrinsic::x86_avx_vpermilvar_ps_256: case Intrinsic::x86_avx_vpermilvar_pd: Index: llvm/trunk/test/CodeGen/X86/avx-blend.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx-blend.ll +++ llvm/trunk/test/CodeGen/X86/avx-blend.ll @@ -135,3 +135,26 @@ %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y ret <2 x double> %min } + +; If we can figure out a blend has a constant mask, we should emit the +; blend instruction with an immediate mask +define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) { +; CHECK-LABEL: constant_blendvpd_avx: +; CHECK-NOT: mov +; CHECK: vblendpd +; CHECK: ret + %1 = select <4 x i1> , <4 x double> %xy, <4 x double> %ab + ret <4 x double> %1 +} + +define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) { +; CHECK-LABEL: constant_blendvps_avx: +; CHECK-NOT: mov +; CHECK: vblendps +; CHECK: ret + %1 = select <8 x i1> , <8 x float> %xyzw, <8 x float> %abcd + ret <8 x float> %1 +} + +declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>) +declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>) Index: llvm/trunk/test/CodeGen/X86/avx2-blend.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx2-blend.ll +++ llvm/trunk/test/CodeGen/X86/avx2-blend.ll @@ -0,0 +1,11 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 | FileCheck %s + +define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) { +; CHECK-LABEL: constant_pblendvb_avx2: +; CHECK: vmovdqa +; CHECK: vpblendvb + %1 = select <32 x i1> , <32 x i8> %xyzw, <32 x i8> %abcd + ret <32 x i8> %1 +} + +declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) Index: llvm/trunk/test/CodeGen/X86/sse41-blend.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse41-blend.ll +++ llvm/trunk/test/CodeGen/X86/sse41-blend.ll @@ -88,3 +88,35 @@ store double %extract214vector_func.i, double addrspace(1)* undef, align 8 ret void } + +; If we can figure out a blend has a constant mask, we should emit the +; blend instruction with an immediate mask +define <2 x double> @constant_blendvpd(<2 x double> %xy, <2 x double> %ab) { +; In this case, we emit a simple movss +; CHECK-LABEL: constant_blendvpd +; CHECK: movsd +; CHECK: ret + %1 = select <2 x i1> , <2 x double> %xy, <2 x double> %ab + ret <2 x double> %1 +} + +define <4 x float> @constant_blendvps(<4 x float> %xyzw, <4 x float> %abcd) { +; CHECK-LABEL: constant_blendvps +; CHECK-NOT: mov +; CHECK: blendps $7 +; CHECK: ret + %1 = select <4 x i1> , <4 x float> %xyzw, <4 x float> %abcd + ret <4 x float> %1 +} + +define <16 x i8> @constant_pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd) { +; CHECK-LABEL: constant_pblendvb: +; CHECK: movaps +; CHECK: pblendvb +; CHECK: ret + %1 = select <16 x i1> , <16 x i8> %xyzw, <16 x i8> %abcd + ret <16 x i8> %1 +} +declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) +declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) +declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) Index: llvm/trunk/test/Transforms/InstCombine/blend_x86.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/blend_x86.ll +++ llvm/trunk/test/Transforms/InstCombine/blend_x86.ll @@ -0,0 +1,56 @@ +; RUN: opt < %s -instcombine -mtriple=x86_64-apple-macosx -mcpu=core-avx2 -S | FileCheck %s + +define <2 x double> @constant_blendvpd(<2 x double> %xy, <2 x double> %ab) { +; CHECK-LABEL: @constant_blendvpd +; CHECK: select <2 x i1> + %1 = tail call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %xy, <2 x double> %ab, <2 x double> ) + ret <2 x double> %1 +} + +define <4 x float> @constant_blendvps(<4 x float> %xyzw, <4 x float> %abcd) { +; CHECK-LABEL: @constant_blendvps +; CHECK: select <4 x i1> + %1 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %xyzw, <4 x float> %abcd, <4 x float> ) + ret <4 x float> %1 +} + +define <16 x i8> @constant_pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd) { +; CHECK-LABEL: @constant_pblendvb +; CHECK: select <16 x i1> + %1 = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd, <16 x i8> ) + ret <16 x i8> %1 +} + +define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) { +; CHECK-LABEL: @constant_blendvpd_avx +; CHECK: select <4 x i1> + %1 = tail call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %xy, <4 x double> %ab, <4 x double> ) + ret <4 x double> %1 +} + +define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) { +; CHECK-LABEL: @constant_blendvps_avx +; CHECK: select <8 x i1> + %1 = tail call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %xyzw, <8 x float> %abcd, <8 x float> ) + ret <8 x float> %1 +} + +define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) { +; CHECK-LABEL: @constant_pblendvb_avx2 +; CHECK: select <32 x i1> + %1 = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %xyzw, <32 x i8> %abcd, + <32 x i8> ) + ret <32 x i8> %1 +} + +declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) +declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) +declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) + +declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) +declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>) +declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>) +