Index: include/llvm/IR/IntrinsicsX86.td =================================================================== --- include/llvm/IR/IntrinsicsX86.td +++ include/llvm/IR/IntrinsicsX86.td @@ -264,12 +264,6 @@ Intrinsic<[], [llvm_ptr_ty], []>; } -// Misc. -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_sse_movmsk_ps : GCCBuiltin<"__builtin_ia32_movmskps">, - Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; -} - //===----------------------------------------------------------------------===// // SSE2 @@ -490,10 +484,6 @@ def int_x86_sse2_packuswb_128 : GCCBuiltin<"__builtin_ia32_packuswb128">, Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>; - def int_x86_sse2_movmsk_pd : GCCBuiltin<"__builtin_ia32_movmskpd">, - Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; - def int_x86_sse2_pmovmskb_128 : GCCBuiltin<"__builtin_ia32_pmovmskb128">, - Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>; def int_x86_sse2_maskmov_dqu : GCCBuiltin<"__builtin_ia32_maskmovdqu">, Intrinsic<[], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_ptr_ty], []>; @@ -1466,14 +1456,6 @@ [IntrNoMem]>; } -// Vector extract sign mask -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_avx_movmsk_pd_256 : GCCBuiltin<"__builtin_ia32_movmskpd256">, - Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty], [IntrNoMem]>; - def int_x86_avx_movmsk_ps_256 : GCCBuiltin<"__builtin_ia32_movmskps256">, - Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty], [IntrNoMem]>; -} - // Vector zero let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_vzeroall : GCCBuiltin<"__builtin_ia32_vzeroall">, @@ -2075,8 +2057,6 @@ // Misc. let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_avx2_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb256">, - Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty], [IntrNoMem]>; def int_x86_avx2_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb256">, Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>; Index: lib/IR/AutoUpgrade.cpp =================================================================== --- lib/IR/AutoUpgrade.cpp +++ lib/IR/AutoUpgrade.cpp @@ -203,6 +203,14 @@ Name.startswith("sse41.pmovzx") || // Added in 3.9 Name.startswith("avx2.pmovsx") || // Added in 3.9 Name.startswith("avx2.pmovzx") || // Added in 3.9 + Name.startswith("sse.pmovmskb") || // Added in 6.0 + Name.startswith("sse.movmsk.ps") || // Added in 6.0 + Name.startswith("sse2.movmsk.ps") || // Added in 6.0 + Name.startswith("sse2.movmsk.pd") || // Added in 6.0 + Name.startswith("sse2.pmovmskb.128") || // Added in 6.0 + Name.startswith("avx.movmsk.pd.256") || // Added in 6.0 + Name.startswith("avx.movmsk.ps.256") || // Added in 6.0 + Name.startswith("avx2.pmovmskb") || // Added in 6.0 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 @@ -910,6 +918,29 @@ return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); } +static Value *EmitX86Msak(IRBuilder<> &Builder, ArrayRef Ops) { + Type *Typ1 = Ops[0]->getType(); + Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_SLT, Ops[0], + ConstantVector::getNullValue(Typ1)); + Value *BitCast = Builder.CreateBitCast( + Cmp, Type::getIntNTy(Builder.getContext(), Typ1->getVectorNumElements())); + return (Typ1->getVectorNumElements() < 32) + ? Builder.CreateZExt(BitCast, + Type::getInt32Ty(Builder.getContext())) + : BitCast; +} + +/// Converting floating point (32/64 bit) into integer. +static Value *EmitX86MsakFloat(IRBuilder<> &Builder, ArrayRef Ops) { + unsigned DstTypEle = Ops[0]->getType()->getVectorNumElements(); + Type *New = Ops[0]->getType()->getScalarSizeInBits() == 32 + ? Type::getInt32Ty(Builder.getContext()) + : Type::getInt64Ty(Builder.getContext()); + Type *DstTyp = VectorType::get(New, DstTypEle); + Value *BitCastFloatToInt = Builder.CreateBitCast(Ops[0], DstTyp); + return EmitX86Msak(Builder, BitCastFloatToInt); +} + /// Upgrade a call to an old intrinsic. All argument and return casting must be /// provided to seamlessly integrate with existing context. void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { @@ -1278,6 +1309,15 @@ if (CI->getNumArgOperands() == 3) Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, CI->getArgOperand(1)); + } else if (IsX86 && (Name.startswith("avx2.pmovmskb")|| + (Name.startswith("sse2.pmovmskb.128")))) { + Rep = EmitX86Msak(Builder, CI->getOperand(0)); + } else if (IsX86 && (Name.startswith("sse.movmsk.ps") || + Name.startswith("sse2.movmsk.ps") || + Name.startswith("sse2.movmsk.pd") || + Name.startswith("avx.movmsk.pd.256") || + Name.startswith("avx.movmsk.ps.256"))) { + Rep = EmitX86MsakFloat(Builder, CI->getOperand(0)); } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || Name == "avx2.vbroadcasti128")) { // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -30124,6 +30124,18 @@ DAG.getBitcast(MVT::v2i64, Res)); } + // Combine (bitcast(SETCC Vec , All zero Vec , SETLT),i32/i64) into + // X86ISDMOVMSK Vec. + if (N0->getOpcode() == ISD::SETCC && !VT.isFloatingPoint() && + !VT.isVector()) { + ISD::CondCode CC = cast(N0.getOperand(2))->get(); + SDValue LHS = N0.getOperand(0); + SDValue RHS = N0.getOperand(1); + if (!LHS.isUndef() && isNullConstant(RHS.getOperand(0)) && + CC == ISD::CondCode::SETLT) + return DAG.getNode(X86ISD::MOVMSK, SDLoc(N0), VT, LHS); + } + // Convert a bitcasted integer logic operation that has one bitcasted // floating-point operand into a floating-point logic operation. This may // create a load of a constant, but that is cheaper than materializing the @@ -30159,7 +30171,7 @@ SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0); return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0); } - + return SDValue(); } @@ -35693,6 +35705,26 @@ SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); + // Combine (i32 zext(bitcast(setcc(bitcast(vec1),vec2)))) => MOVMSK vec1 + // Combine (i64 zext(bitcast(setcc(bitcast(vec1),vec2)))) => + // (i64 zext(MOVMSK vec1)) + if ((VT == MVT::i32 || VT == MVT::i64) && N0.getOpcode() == ISD::BITCAST && + N0.getOperand(0).getOpcode() == ISD::SETCC) { + SDValue N00 = N0.getOperand(0); + if (N00.getOperand(0).getOpcode() == ISD::BITCAST && + N00.getOperand(0).getOperand(0).getValueType().isFloatingPoint()) { + SDValue MaskI32 = + DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, + N0.getOperand(0).getOperand(0).getOperand(0)); + return (VT == MVT::i32) + ? MaskI32 + : DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, MaskI32); + } + if (VT == MVT::i32 && + N00.getOperand(0).getValueType().getScalarSizeInBits() == 8) + return DAG.getNode(X86ISD::MOVMSK, dl, VT, N00.getOperand(0)); + } + if (N0.getOpcode() == ISD::AND && N0.hasOneUse() && N0.getOperand(0).hasOneUse()) { @@ -35740,7 +35772,7 @@ if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget)) return R; - + return SDValue(); } @@ -36718,6 +36750,26 @@ return SDValue(); } +// Combine (X86ISD:MOVMSK(SIGN_EXTEND(SETCC Vec1, allzeros, SETLT))) => +// (X86ISD:MOVMSK Vec1) +static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + MVT VT = N->getSimpleValueType(0); + SDLoc DL(N); + + if (N->getOperand(0)->getOpcode() == ISD::SIGN_EXTEND) { + SDValue N0 = N->getOperand(0); + if (N0.getOperand(0).getOpcode() == ISD::SETCC) { + SDValue N00 = N0.getOperand(0); + SDValue LHS = N00.getOperand(0); + SDValue RHS = N00.getOperand(1); + ISD::CondCode CC = cast(N00.getOperand(2))->get(); + if (isNullConstant(RHS.getOperand(0)) && CC == ISD::SETLT) + return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, LHS); + } + } + return SDValue(); +} static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { @@ -37023,6 +37075,7 @@ case X86ISD::TESTM: return combineTestM(N, DAG, Subtarget); case X86ISD::PCMPEQ: case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget); + case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, Subtarget); } return SDValue(); Index: lib/Target/X86/X86IntrinsicsInfo.h =================================================================== --- lib/Target/X86/X86IntrinsicsInfo.h +++ lib/Target/X86/X86IntrinsicsInfo.h @@ -385,8 +385,6 @@ X86_INTRINSIC_DATA(avx_max_ps_256, INTR_TYPE_2OP, X86ISD::FMAX, 0), X86_INTRINSIC_DATA(avx_min_pd_256, INTR_TYPE_2OP, X86ISD::FMIN, 0), X86_INTRINSIC_DATA(avx_min_ps_256, INTR_TYPE_2OP, X86ISD::FMIN, 0), - X86_INTRINSIC_DATA(avx_movmsk_pd_256, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), - X86_INTRINSIC_DATA(avx_movmsk_ps_256, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(avx_rcp_ps_256, INTR_TYPE_1OP, X86ISD::FRCP, 0), X86_INTRINSIC_DATA(avx_round_pd_256, ROUNDP, X86ISD::VRNDSCALE, 0), X86_INTRINSIC_DATA(avx_round_ps_256, ROUNDP, X86ISD::VRNDSCALE, 0), @@ -411,7 +409,6 @@ X86_INTRINSIC_DATA(avx2_phsub_w, INTR_TYPE_2OP, X86ISD::HSUB, 0), X86_INTRINSIC_DATA(avx2_pmadd_ub_sw, INTR_TYPE_2OP, X86ISD::VPMADDUBSW, 0), X86_INTRINSIC_DATA(avx2_pmadd_wd, INTR_TYPE_2OP, X86ISD::VPMADDWD, 0), - X86_INTRINSIC_DATA(avx2_pmovmskb, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(avx2_pmul_dq, INTR_TYPE_2OP, X86ISD::PMULDQ, 0), X86_INTRINSIC_DATA(avx2_pmul_hr_sw, INTR_TYPE_2OP, X86ISD::MULHRS, 0), X86_INTRINSIC_DATA(avx2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0), @@ -1597,7 +1594,6 @@ X86_INTRINSIC_DATA(sse_max_ss, INTR_TYPE_2OP, X86ISD::FMAXS, 0), X86_INTRINSIC_DATA(sse_min_ps, INTR_TYPE_2OP, X86ISD::FMIN, 0), X86_INTRINSIC_DATA(sse_min_ss, INTR_TYPE_2OP, X86ISD::FMINS, 0), - X86_INTRINSIC_DATA(sse_movmsk_ps, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(sse_rcp_ps, INTR_TYPE_1OP, X86ISD::FRCP, 0), X86_INTRINSIC_DATA(sse_rsqrt_ps, INTR_TYPE_1OP, X86ISD::FRSQRT, 0), X86_INTRINSIC_DATA(sse_sqrt_ps, INTR_TYPE_1OP, ISD::FSQRT, 0), @@ -1623,7 +1619,6 @@ X86_INTRINSIC_DATA(sse2_max_sd, INTR_TYPE_2OP, X86ISD::FMAXS, 0), X86_INTRINSIC_DATA(sse2_min_pd, INTR_TYPE_2OP, X86ISD::FMIN, 0), X86_INTRINSIC_DATA(sse2_min_sd, INTR_TYPE_2OP, X86ISD::FMINS, 0), - X86_INTRINSIC_DATA(sse2_movmsk_pd, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(sse2_packssdw_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0), X86_INTRINSIC_DATA(sse2_packsswb_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0), X86_INTRINSIC_DATA(sse2_packuswb_128, INTR_TYPE_2OP, X86ISD::PACKUS, 0), @@ -1632,7 +1627,6 @@ X86_INTRINSIC_DATA(sse2_paddus_b, INTR_TYPE_2OP, X86ISD::ADDUS, 0), X86_INTRINSIC_DATA(sse2_paddus_w, INTR_TYPE_2OP, X86ISD::ADDUS, 0), X86_INTRINSIC_DATA(sse2_pmadd_wd, INTR_TYPE_2OP, X86ISD::VPMADDWD, 0), - X86_INTRINSIC_DATA(sse2_pmovmskb_128, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(sse2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0), X86_INTRINSIC_DATA(sse2_pmulhu_w, INTR_TYPE_2OP, ISD::MULHU, 0), X86_INTRINSIC_DATA(sse2_pmulu_dq, INTR_TYPE_2OP, X86ISD::PMULUDQ, 0), Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2319,12 +2319,6 @@ } case Intrinsic::x86_mmx_pmovmskb: - case Intrinsic::x86_sse_movmsk_ps: - case Intrinsic::x86_sse2_movmsk_pd: - case Intrinsic::x86_sse2_pmovmskb_128: - case Intrinsic::x86_avx_movmsk_pd_256: - case Intrinsic::x86_avx_movmsk_ps_256: - case Intrinsic::x86_avx2_pmovmskb: if (Value *V = simplifyX86movmsk(*II)) return replaceInstUsesWith(*II, V); break; Index: lib/Transforms/InstCombine/InstCombineCasts.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCasts.cpp +++ lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -2041,6 +2041,25 @@ return nullptr; } +static Instruction *foldBitCastExtEltBitcast(BitCastInst &BitCast, + InstCombiner::BuilderTy &Builder) { + auto *ExtElt = dyn_cast(BitCast.getOperand(0)); + if (!ExtElt) + return nullptr; + auto *BitCastElt = dyn_cast_or_null(ExtElt->getOperand(0)); + if (!BitCastElt) + return nullptr; + Value *LHS = BitCastElt->getOperand(0); + Value *RHS = UndefValue::get(LHS->getType()); + SmallVector MaskValue; + for (uint64_t i = 0; i < LHS->getType()->getVectorNumElements(); ++i) + MaskValue.push_back( + ConstantInt::get(Type::getInt32Ty(Builder.getContext()), i)); + ArrayRef MaskConst(MaskValue); + Value *Mask = ConstantVector::get(MaskConst); + return new ShuffleVectorInst(LHS, RHS, Mask); +} + /// Change the type of a select if we can eliminate a bitcast. static Instruction *foldBitCastSelect(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder) { @@ -2343,6 +2362,9 @@ if (Instruction *I = foldBitCastSelect(CI, Builder)) return I; + if (Instruction *I = foldBitCastExtEltBitcast(CI, Builder)) + return I; + if (SrcTy->isPointerTy()) return commonPointerCastTransforms(CI); return commonCastTransforms(CI); Index: lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -638,12 +638,7 @@ break; } case Intrinsic::x86_mmx_pmovmskb: - case Intrinsic::x86_sse_movmsk_ps: - case Intrinsic::x86_sse2_movmsk_pd: - case Intrinsic::x86_sse2_pmovmskb_128: - case Intrinsic::x86_avx_movmsk_ps_256: - case Intrinsic::x86_avx_movmsk_pd_256: - case Intrinsic::x86_avx2_pmovmskb: { + { // MOVMSK copies the vector elements' sign bits to the low bits // and zeros the high bits. unsigned ArgWidth; Index: test/CodeGen/X86/avx-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/avx-intrinsics-fast-isel.ll +++ test/CodeGen/X86/avx-intrinsics-fast-isel.ll @@ -1539,13 +1539,25 @@ define i32 @test_mm256_movemask_ps(<8 x float> %a0) nounwind { ; X32-LABEL: test_mm256_movemask_ps: ; X32: # BB#0: +; X32-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X32-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm1 +; X32-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-NEXT: vmovmskps %ymm0, %eax +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_movemask_ps: ; X64: # BB#0: +; X64-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X64-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm1 +; X64-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X64-NEXT: vmovmskps %ymm0, %eax +; X64-NEXT: movzbl %al, %eax ; X64-NEXT: vzeroupper ; X64-NEXT: retq %res = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) Index: test/CodeGen/X86/avx2-intrinsics-x86.ll =================================================================== --- test/CodeGen/X86/avx2-intrinsics-x86.ll +++ test/CodeGen/X86/avx2-intrinsics-x86.ll @@ -5,15 +5,25 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X64 --check-prefix=X64-AVX512VL define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_packssdw: -; AVX2: ## BB#0: -; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x6b,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_packssdw: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_packssdw: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x6b,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_packssdw: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_packssdw: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x6b,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_packssdw: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -26,43 +36,53 @@ ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4 -; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-NEXT: retl ## encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold: ; X86-AVX512VL: ## BB#0: ; X86-AVX512VL-NEXT: vmovaps LCPI1_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4 -; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_packssdw_fold: ; X64-AVX: ## BB#0: ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte -; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX-NEXT: retq ## encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold: ; X64-AVX512VL: ## BB#0: ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte -; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> ) ret <16 x i16> %res } define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_packsswb: -; AVX2: ## BB#0: -; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x63,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_packsswb: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_packsswb: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x63,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_packsswb: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_packsswb: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x63,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_packsswb: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -75,43 +95,53 @@ ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4 -; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-NEXT: retl ## encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold: ; X86-AVX512VL: ## BB#0: ; X86-AVX512VL-NEXT: vmovaps LCPI3_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4 -; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_packsswb_fold: ; X64-AVX: ## BB#0: ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte -; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX-NEXT: retq ## encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold: ; X64-AVX512VL: ## BB#0: ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte -; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> , <16 x i16> zeroinitializer) ret <32 x i8> %res } define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_packuswb: -; AVX2: ## BB#0: -; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x67,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_packuswb: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_packuswb: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x67,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_packuswb: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_packuswb: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x67,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_packuswb: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -124,43 +154,53 @@ ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4 -; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-NEXT: retl ## encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold: ; X86-AVX512VL: ## BB#0: ; X86-AVX512VL-NEXT: vmovaps LCPI5_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4 -; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_packuswb_fold: ; X64-AVX: ## BB#0: ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte -; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX-NEXT: retq ## encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold: ; X64-AVX512VL: ## BB#0: ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte -; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> , <16 x i16> zeroinitializer) ret <32 x i8> %res } define <32 x i8> @test_x86_avx2_padds_b(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_padds_b: -; AVX2: ## BB#0: -; AVX2-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xec,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_padds_b: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_padds_b: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xec,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_padds_b: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_padds_b: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xec,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_padds_b: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -168,15 +208,25 @@ define <16 x i16> @test_x86_avx2_padds_w(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_padds_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xed,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_padds_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_padds_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xed,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_padds_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_padds_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xed,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_padds_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -184,15 +234,25 @@ define <32 x i8> @test_x86_avx2_paddus_b(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_paddus_b: -; AVX2: ## BB#0: -; AVX2-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdc,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_paddus_b: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_paddus_b: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdc,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_paddus_b: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_paddus_b: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdc,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_paddus_b: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -200,15 +260,25 @@ define <16 x i16> @test_x86_avx2_paddus_w(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_paddus_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdd,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_paddus_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_paddus_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdd,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_paddus_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_paddus_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdd,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_paddus_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -216,15 +286,25 @@ define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_pmadd_wd: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf5,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmadd_wd: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmadd_wd: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf5,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmadd_wd: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmadd_wd: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf5,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmadd_wd: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -232,15 +312,25 @@ define <16 x i16> @test_x86_avx2_pmaxs_w(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_pmaxs_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xee,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmaxs_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xee,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmaxs_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xee,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmaxs_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xee,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmaxs_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xee,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmaxs_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xee,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -248,15 +338,25 @@ define <32 x i8> @test_x86_avx2_pmaxu_b(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_pmaxu_b: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xde,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmaxu_b: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmaxu_b: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xde,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmaxu_b: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmaxu_b: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xde,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmaxu_b: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -264,15 +364,25 @@ define <16 x i16> @test_x86_avx2_pmins_w(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_pmins_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xea,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmins_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xea,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmins_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xea,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmins_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xea,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmins_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xea,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmins_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xea,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -280,15 +390,25 @@ define <32 x i8> @test_x86_avx2_pminu_b(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_pminu_b: -; AVX2: ## BB#0: -; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xda,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pminu_b: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pminu_b: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xda,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pminu_b: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pminu_b: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xda,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pminu_b: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -296,11 +416,17 @@ define i32 @test_x86_avx2_pmovmskb(<32 x i8> %a0) { -; CHECK-LABEL: test_x86_avx2_pmovmskb: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_pmovmskb: +; X86: ## BB#0: +; X86-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0] +; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_pmovmskb: +; X64: ## BB#0: +; X64-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0] +; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] +; X64-NEXT: retq ## encoding: [0xc3] %res = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %a0) ; [#uses=1] ret i32 %res } @@ -308,15 +434,25 @@ define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_pmulh_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe5,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmulh_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmulh_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe5,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmulh_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmulh_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe5,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmulh_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -324,15 +460,25 @@ define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_pmulhu_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe4,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmulhu_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmulhu_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe4,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmulhu_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmulhu_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe4,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmulhu_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -340,15 +486,25 @@ define <4 x i64> @test_x86_avx2_pmulu_dq(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_pmulu_dq: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf4,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmulu_dq: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf4,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmulu_dq: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf4,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmulu_dq: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf4,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmulu_dq: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf4,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmulu_dq: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf4,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -356,15 +512,25 @@ define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_psad_bw: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf6,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psad_bw: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf6,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psad_bw: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf6,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psad_bw: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf6,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psad_bw: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf6,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psad_bw: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf6,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -372,15 +538,25 @@ define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psll_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf2,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psll_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psll_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf2,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psll_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psll_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf2,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psll_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -388,15 +564,25 @@ define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) { -; AVX2-LABEL: test_x86_avx2_psll_q: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf3,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psll_q: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psll_q: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf3,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psll_q: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psll_q: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf3,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psll_q: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -404,15 +590,25 @@ define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_psll_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf1,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psll_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psll_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf1,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psll_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psll_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf1,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psll_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -420,15 +616,25 @@ define <8 x i32> @test_x86_avx2_pslli_d(<8 x i32> %a0) { -; AVX2-LABEL: test_x86_avx2_pslli_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpslld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xf0,0x07] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pslli_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpslld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xf0,0x07] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pslli_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpslld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xf0,0x07] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pslli_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpslld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xf0,0x07] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pslli_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpslld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xf0,0x07] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pslli_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpslld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xf0,0x07] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -436,15 +642,25 @@ define <4 x i64> @test_x86_avx2_pslli_q(<4 x i64> %a0) { -; AVX2-LABEL: test_x86_avx2_pslli_q: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsllq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xf0,0x07] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pslli_q: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsllq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xf0,0x07] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pslli_q: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsllq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xf0,0x07] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pslli_q: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsllq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xf0,0x07] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pslli_q: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsllq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xf0,0x07] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pslli_q: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsllq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xf0,0x07] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -452,15 +668,25 @@ define <16 x i16> @test_x86_avx2_pslli_w(<16 x i16> %a0) { -; AVX2-LABEL: test_x86_avx2_pslli_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xf0,0x07] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pslli_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xf0,0x07] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pslli_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsllw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xf0,0x07] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pslli_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xf0,0x07] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pslli_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsllw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xf0,0x07] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pslli_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xf0,0x07] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -468,15 +694,25 @@ define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psra_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe2,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psra_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psra_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe2,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psra_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psra_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe2,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psra_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -484,15 +720,25 @@ define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_psra_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe1,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psra_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psra_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe1,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psra_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psra_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe1,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psra_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -500,15 +746,25 @@ define <8 x i32> @test_x86_avx2_psrai_d(<8 x i32> %a0) { -; AVX2-LABEL: test_x86_avx2_psrai_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrad $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xe0,0x07] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrai_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrad $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xe0,0x07] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrai_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrad $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xe0,0x07] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrai_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrad $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xe0,0x07] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrai_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrad $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xe0,0x07] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrai_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrad $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xe0,0x07] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -516,15 +772,25 @@ define <16 x i16> @test_x86_avx2_psrai_w(<16 x i16> %a0) { -; AVX2-LABEL: test_x86_avx2_psrai_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsraw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xe0,0x07] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrai_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsraw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xe0,0x07] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrai_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsraw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xe0,0x07] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrai_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsraw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xe0,0x07] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrai_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsraw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xe0,0x07] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrai_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsraw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xe0,0x07] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -532,15 +798,25 @@ define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psrl_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd2,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrl_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrl_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd2,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrl_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrl_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd2,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrl_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -548,15 +824,25 @@ define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) { -; AVX2-LABEL: test_x86_avx2_psrl_q: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd3,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrl_q: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrl_q: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd3,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrl_q: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrl_q: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd3,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrl_q: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -564,15 +850,25 @@ define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_psrl_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrl_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrl_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrl_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrl_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrl_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -580,15 +876,25 @@ define <8 x i32> @test_x86_avx2_psrli_d(<8 x i32> %a0) { -; AVX2-LABEL: test_x86_avx2_psrli_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xd0,0x07] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrli_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xd0,0x07] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrli_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xd0,0x07] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrli_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xd0,0x07] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrli_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xd0,0x07] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrli_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xd0,0x07] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -596,15 +902,25 @@ define <4 x i64> @test_x86_avx2_psrli_q(<4 x i64> %a0) { -; AVX2-LABEL: test_x86_avx2_psrli_q: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xd0,0x07] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrli_q: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrlq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xd0,0x07] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrli_q: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrlq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xd0,0x07] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrli_q: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrlq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xd0,0x07] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrli_q: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrlq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xd0,0x07] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrli_q: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrlq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xd0,0x07] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -612,15 +928,25 @@ define <16 x i16> @test_x86_avx2_psrli_w(<16 x i16> %a0) { -; AVX2-LABEL: test_x86_avx2_psrli_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xd0,0x07] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrli_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xd0,0x07] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrli_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrlw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xd0,0x07] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrli_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xd0,0x07] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrli_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrlw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xd0,0x07] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrli_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xd0,0x07] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -628,15 +954,25 @@ define <32 x i8> @test_x86_avx2_psubs_b(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_psubs_b: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe8,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psubs_b: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psubs_b: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe8,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psubs_b: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psubs_b: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe8,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psubs_b: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -644,15 +980,25 @@ define <16 x i16> @test_x86_avx2_psubs_w(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_psubs_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe9,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psubs_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psubs_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe9,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psubs_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psubs_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe9,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psubs_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -660,15 +1006,25 @@ define <32 x i8> @test_x86_avx2_psubus_b(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_psubus_b: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd8,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psubus_b: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psubus_b: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd8,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psubus_b: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psubus_b: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd8,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psubus_b: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -676,25 +1032,40 @@ define <16 x i16> @test_x86_avx2_psubus_w(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_psubus_w: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd9,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psubus_w: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psubus_w: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd9,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psubus_w: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psubus_w: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd9,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psubus_w: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) { -; CHECK-LABEL: test_x86_avx2_phadd_d: -; CHECK: ## BB#0: -; CHECK-NEXT: vphaddd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x02,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_phadd_d: +; X86: ## BB#0: +; X86-NEXT: vphaddd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x02,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_phadd_d: +; X64: ## BB#0: +; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x02,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -702,10 +1073,15 @@ define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) { -; CHECK-LABEL: test_x86_avx2_phadd_sw: -; CHECK: ## BB#0: -; CHECK-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x03,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_phadd_sw: +; X86: ## BB#0: +; X86-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x03,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_phadd_sw: +; X64: ## BB#0: +; X64-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x03,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -713,10 +1089,15 @@ define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) { -; CHECK-LABEL: test_x86_avx2_phadd_w: -; CHECK: ## BB#0: -; CHECK-NEXT: vphaddw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x01,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_phadd_w: +; X86: ## BB#0: +; X86-NEXT: vphaddw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x01,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_phadd_w: +; X64: ## BB#0: +; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x01,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -724,10 +1105,15 @@ define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) { -; CHECK-LABEL: test_x86_avx2_phsub_d: -; CHECK: ## BB#0: -; CHECK-NEXT: vphsubd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x06,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_phsub_d: +; X86: ## BB#0: +; X86-NEXT: vphsubd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x06,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_phsub_d: +; X64: ## BB#0: +; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x06,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -735,10 +1121,15 @@ define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) { -; CHECK-LABEL: test_x86_avx2_phsub_sw: -; CHECK: ## BB#0: -; CHECK-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x07,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_phsub_sw: +; X86: ## BB#0: +; X86-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x07,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_phsub_sw: +; X64: ## BB#0: +; X64-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x07,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -746,10 +1137,15 @@ define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) { -; CHECK-LABEL: test_x86_avx2_phsub_w: -; CHECK: ## BB#0: -; CHECK-NEXT: vphsubw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x05,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_phsub_w: +; X86: ## BB#0: +; X86-NEXT: vphsubw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x05,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_phsub_w: +; X64: ## BB#0: +; X64-NEXT: vphsubw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x05,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -757,15 +1153,25 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_pmadd_ub_sw: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x04,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmadd_ub_sw: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x04,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmadd_ub_sw: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x04,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -778,41 +1184,51 @@ ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX-NEXT: vmovdqa (%eax), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x08] ; X86-AVX-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0] -; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-NEXT: retl ## encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0: ; X86-AVX512VL: ## BB#0: ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX512VL-NEXT: vmovdqa (%eax), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x08] ; X86-AVX512VL-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0] -; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0: ; X64-AVX: ## BB#0: ; X64-AVX-NEXT: vmovdqa (%rdi), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x0f] ; X64-AVX-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0] -; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX-NEXT: retq ## encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0: ; X64-AVX512VL: ## BB#0: ; X64-AVX512VL-NEXT: vmovdqa (%rdi), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x0f] ; X64-AVX512VL-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0] -; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %a0 = load <32 x i8>, <32 x i8>* %ptr %res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_pmul_hr_sw: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0b,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmul_hr_sw: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmul_hr_sw: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0b,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmul_hr_sw: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmul_hr_sw: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0b,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmul_hr_sw: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -820,15 +1236,25 @@ define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_pshuf_b: -; AVX2: ## BB#0: -; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x00,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pshuf_b: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pshuf_b: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x00,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pshuf_b: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pshuf_b: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x00,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pshuf_b: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i8>> [#uses=1] ret <32 x i8> %res } @@ -836,10 +1262,15 @@ define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) { -; CHECK-LABEL: test_x86_avx2_psign_b: -; CHECK: ## BB#0: -; CHECK-NEXT: vpsignb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x08,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_psign_b: +; X86: ## BB#0: +; X86-NEXT: vpsignb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x08,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_psign_b: +; X64: ## BB#0: +; X64-NEXT: vpsignb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x08,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -847,10 +1278,15 @@ define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) { -; CHECK-LABEL: test_x86_avx2_psign_d: -; CHECK: ## BB#0: -; CHECK-NEXT: vpsignd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0a,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_psign_d: +; X86: ## BB#0: +; X86-NEXT: vpsignd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0a,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_psign_d: +; X64: ## BB#0: +; X64-NEXT: vpsignd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0a,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i32>> [#uses=1] ret <8 x i32> %res } @@ -858,10 +1294,15 @@ define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) { -; CHECK-LABEL: test_x86_avx2_psign_w: -; CHECK: ## BB#0: -; CHECK-NEXT: vpsignw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x09,0xc1] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_psign_w: +; X86: ## BB#0: +; X86-NEXT: vpsignw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x09,0xc1] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_psign_w: +; X64: ## BB#0: +; X64-NEXT: vpsignw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x09,0xc1] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -869,10 +1310,15 @@ define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) { -; CHECK-LABEL: test_x86_avx2_mpsadbw: -; CHECK: ## BB#0: -; CHECK-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x42,0xc1,0x07] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_mpsadbw: +; X86: ## BB#0: +; X86-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x42,0xc1,0x07] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_mpsadbw: +; X64: ## BB#0: +; X64-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x42,0xc1,0x07] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -880,15 +1326,25 @@ define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_packusdw: -; AVX2: ## BB#0: -; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2b,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_packusdw: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_packusdw: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2b,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_packusdw: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_packusdw: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2b,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_packusdw: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -901,38 +1357,43 @@ ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI55_0, kind: FK_Data_4 -; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-NEXT: retl ## encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold: ; X86-AVX512VL: ## BB#0: ; X86-AVX512VL-NEXT: vmovaps LCPI55_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI55_0, kind: FK_Data_4 -; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_packusdw_fold: ; X64-AVX: ## BB#0: ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI55_0-4, kind: reloc_riprel_4byte -; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX-NEXT: retq ## encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold: ; X64-AVX512VL: ## BB#0: ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI55_0-4, kind: reloc_riprel_4byte -; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> ) ret <16 x i16> %res } define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) { -; CHECK-LABEL: test_x86_avx2_pblendvb: -; CHECK: ## BB#0: -; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4c,0xc1,0x20] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_pblendvb: +; X86: ## BB#0: +; X86-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4c,0xc1,0x20] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_pblendvb: +; X64: ## BB#0: +; X64-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4c,0xc1,0x20] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -940,11 +1401,17 @@ define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) { -; CHECK-LABEL: test_x86_avx2_pblendw: -; CHECK: ## BB#0: -; CHECK-NEXT: vpblendw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0e,0xc1,0x07] -; CHECK-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_pblendw: +; X86: ## BB#0: +; X86-NEXT: vpblendw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0e,0xc1,0x07] +; X86-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_pblendw: +; X64: ## BB#0: +; X64-NEXT: vpblendw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0e,0xc1,0x07] +; X64-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i8 7) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -952,15 +1419,25 @@ define <32 x i8> @test_x86_avx2_pmaxsb(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_pmaxsb: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3c,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmaxsb: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmaxsb: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3c,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmaxsb: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmaxsb: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3c,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmaxsb: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -968,15 +1445,25 @@ define <8 x i32> @test_x86_avx2_pmaxsd(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_pmaxsd: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3d,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmaxsd: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmaxsd: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3d,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmaxsd: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmaxsd: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3d,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmaxsd: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -984,15 +1471,25 @@ define <8 x i32> @test_x86_avx2_pmaxud(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_pmaxud: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3f,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmaxud: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmaxud: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3f,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmaxud: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmaxud: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3f,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmaxud: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1000,15 +1497,25 @@ define <16 x i16> @test_x86_avx2_pmaxuw(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_pmaxuw: -; AVX2: ## BB#0: -; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3e,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pmaxuw: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3e,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pmaxuw: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3e,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pmaxuw: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3e,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pmaxuw: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3e,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pmaxuw: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3e,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -1016,15 +1523,25 @@ define <32 x i8> @test_x86_avx2_pminsb(<32 x i8> %a0, <32 x i8> %a1) { -; AVX2-LABEL: test_x86_avx2_pminsb: -; AVX2: ## BB#0: -; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x38,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pminsb: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pminsb: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x38,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pminsb: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pminsb: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x38,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pminsb: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -1032,15 +1549,25 @@ define <8 x i32> @test_x86_avx2_pminsd(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_pminsd: -; AVX2: ## BB#0: -; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x39,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pminsd: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pminsd: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x39,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pminsd: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pminsd: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x39,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pminsd: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1048,15 +1575,25 @@ define <8 x i32> @test_x86_avx2_pminud(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_pminud: -; AVX2: ## BB#0: -; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3b,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pminud: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pminud: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3b,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pminud: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pminud: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3b,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pminud: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1064,15 +1601,25 @@ define <16 x i16> @test_x86_avx2_pminuw(<16 x i16> %a0, <16 x i16> %a1) { -; AVX2-LABEL: test_x86_avx2_pminuw: -; AVX2: ## BB#0: -; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3a,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_pminuw: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3a,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_pminuw: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3a,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_pminuw: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3a,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_pminuw: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3a,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_pminuw: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3a,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1] ret <16 x i16> %res } @@ -1087,11 +1634,17 @@ define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) { -; CHECK-LABEL: test_x86_avx2_pblendd_128: -; CHECK: ## BB#0: -; CHECK-NEXT: vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08] -; CHECK-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_pblendd_128: +; X86: ## BB#0: +; X86-NEXT: vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08] +; X86-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_pblendd_128: +; X64: ## BB#0: +; X64-NEXT: vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08] +; X64-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i8 7) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -1099,11 +1652,17 @@ define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) { -; CHECK-LABEL: test_x86_avx2_pblendd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vblendps $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07] -; CHECK-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] -; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-LABEL: test_x86_avx2_pblendd_256: +; X86: ## BB#0: +; X86-NEXT: vblendps $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07] +; X86-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] +; X86-NEXT: retl ## encoding: [0xc3] +; +; X64-LABEL: test_x86_avx2_pblendd_256: +; X64: ## BB#0: +; X64-NEXT: vblendps $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07] +; X64-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i8 7) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1114,15 +1673,25 @@ ; and its lowering. Indeed, the offsets are the first source in ; the instruction. define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_permd: -; AVX2: ## BB#0: -; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_permd: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_permd: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_permd: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_permd: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_permd: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1133,15 +1702,25 @@ ; and its lowering. Indeed, the offsets are the first source in ; the instruction. define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_permps: -; AVX2: ## BB#0: -; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_permps: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_permps: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_permps: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_permps: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_permps: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -1153,12 +1732,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpmaskmovq (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x00] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_maskload_q: ; X64: ## BB#0: ; X64-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x07] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -1170,12 +1749,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpmaskmovq (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x00] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_maskload_q_256: ; X64: ## BB#0: ; X64-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x07] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -1187,12 +1766,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpmaskmovd (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x00] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_maskload_d: ; X64: ## BB#0: ; X64-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x07] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -1204,12 +1783,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpmaskmovd (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x00] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_maskload_d_256: ; X64: ## BB#0: ; X64-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x07] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1221,12 +1800,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpmaskmovq %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x08] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_maskstore_q: ; X64: ## BB#0: ; X64-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x0f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] call void @llvm.x86.avx2.maskstore.q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) ret void } @@ -1239,13 +1818,13 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x08] ; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_maskstore_q_256: ; X64: ## BB#0: ; X64-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x0f] ; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] call void @llvm.x86.avx2.maskstore.q.256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) ret void } @@ -1257,12 +1836,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpmaskmovd %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0x79,0x8e,0x08] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_maskstore_d: ; X64: ## BB#0: ; X64-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0x79,0x8e,0x0f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] call void @llvm.x86.avx2.maskstore.d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) ret void } @@ -1275,13 +1854,13 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x08] ; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_maskstore_d_256: ; X64: ## BB#0: ; X64-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x0f] ; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] call void @llvm.x86.avx2.maskstore.d.256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) ret void } @@ -1289,15 +1868,25 @@ define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psllv_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x47,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psllv_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psllv_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x47,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psllv_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x47,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psllv_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -1305,15 +1894,25 @@ define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psllv_d_256: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x47,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psllv_d_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psllv_d_256: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x47,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_256: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psllv_d_256: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x47,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psllv_d_256: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1321,15 +1920,25 @@ define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) { -; AVX2-LABEL: test_x86_avx2_psllv_q: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x47,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psllv_q: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psllv_q: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x47,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psllv_q: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x47,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -1337,15 +1946,25 @@ define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) { -; AVX2-LABEL: test_x86_avx2_psllv_q_256: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x47,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psllv_q_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psllv_q_256: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x47,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_256: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psllv_q_256: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x47,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_256: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -1353,15 +1972,25 @@ define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psrlv_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x45,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrlv_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrlv_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x45,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrlv_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x45,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -1369,15 +1998,25 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psrlv_d_256: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x45,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrlv_d_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrlv_d_256: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x45,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_256: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrlv_d_256: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x45,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_d_256: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1385,15 +2024,25 @@ define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) { -; AVX2-LABEL: test_x86_avx2_psrlv_q: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x45,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrlv_q: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrlv_q: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x45,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrlv_q: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x45,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -1401,15 +2050,25 @@ define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) { -; AVX2-LABEL: test_x86_avx2_psrlv_q_256: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x45,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrlv_q_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrlv_q_256: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x45,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrlv_q_256: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x45,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1] ret <4 x i64> %res } @@ -1417,15 +2076,25 @@ define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psrav_d: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrav_d: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrav_d: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrav_d: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -1438,7 +2107,7 @@ ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI88_0, kind: FK_Data_4 ; X86-AVX-NEXT: vpsravd LCPI88_1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] ; X86-AVX-NEXT: ## fixup A - offset: 5, value: LCPI88_1, kind: FK_Data_4 -; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-NEXT: retl ## encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const: ; X86-AVX512VL: ## BB#0: @@ -1447,7 +2116,7 @@ ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI88_0, kind: FK_Data_4 ; X86-AVX512VL-NEXT: vpsravd LCPI88_1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI88_1, kind: FK_Data_4 -; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_psrav_d_const: ; X64-AVX: ## BB#0: @@ -1456,7 +2125,7 @@ ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI88_0-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] ; X64-AVX-NEXT: ## fixup A - offset: 5, value: LCPI88_1-4, kind: reloc_riprel_4byte -; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX-NEXT: retq ## encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const: ; X64-AVX512VL: ## BB#0: @@ -1465,22 +2134,32 @@ ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI88_0-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI88_1-4, kind: reloc_riprel_4byte -; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> , <4 x i32> ) ret <4 x i32> %res } declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) { -; AVX2-LABEL: test_x86_avx2_psrav_d_256: -; AVX2: ## BB#0: -; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0xc1] -; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3] -; -; AVX512VL-LABEL: test_x86_avx2_psrav_d_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xc1] -; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-LABEL: test_x86_avx2_psrav_d_256: +; X86-AVX: ## BB#0: +; X86-AVX-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0xc1] +; X86-AVX-NEXT: retl ## encoding: [0xc3] +; +; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256: +; X86-AVX512VL: ## BB#0: +; X86-AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xc1] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] +; +; X64-AVX-LABEL: test_x86_avx2_psrav_d_256: +; X64-AVX: ## BB#0: +; X64-AVX-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0xc1] +; X64-AVX-NEXT: retq ## encoding: [0xc3] +; +; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256: +; X64-AVX512VL: ## BB#0: +; X64-AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xc1] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -1493,7 +2172,7 @@ ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI90_0, kind: FK_Data_4 ; X86-AVX-NEXT: vpsravd LCPI90_1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X86-AVX-NEXT: ## fixup A - offset: 5, value: LCPI90_1, kind: FK_Data_4 -; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-NEXT: retl ## encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const: ; X86-AVX512VL: ## BB#0: @@ -1502,7 +2181,7 @@ ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI90_0, kind: FK_Data_4 ; X86-AVX512VL-NEXT: vpsravd LCPI90_1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI90_1, kind: FK_Data_4 -; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const: ; X64-AVX: ## BB#0: @@ -1511,7 +2190,7 @@ ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI90_0-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X64-AVX-NEXT: ## fixup A - offset: 5, value: LCPI90_1-4, kind: reloc_riprel_4byte -; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX-NEXT: retq ## encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const: ; X64-AVX512VL: ## BB#0: @@ -1520,7 +2199,7 @@ ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI90_0-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI90_1-4, kind: reloc_riprel_4byte -; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> , <8 x i32> ) ret <8 x i32> %res } @@ -1531,12 +2210,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_d_pd: ; X64: ## BB#0: ; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0, i8* %a1, <4 x i32> %idx, <2 x double> %mask, i8 2) ; ret <2 x double> %res @@ -1549,12 +2228,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_d_pd_256: ; X64: ## BB#0: ; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0, i8* %a1, <4 x i32> %idx, <4 x double> %mask, i8 2) ; ret <4 x double> %res @@ -1567,12 +2246,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_q_pd: ; X64: ## BB#0: ; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0, i8* %a1, <2 x i64> %idx, <2 x double> %mask, i8 2) ; ret <2 x double> %res @@ -1585,12 +2264,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_q_pd_256: ; X64: ## BB#0: ; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0, i8* %a1, <4 x i64> %idx, <4 x double> %mask, i8 2) ; ret <4 x double> %res @@ -1603,12 +2282,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_d_ps: ; X64: ## BB#0: ; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0, i8* %a1, <4 x i32> %idx, <4 x float> %mask, i8 2) ; ret <4 x float> %res @@ -1621,12 +2300,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_d_ps_256: ; X64: ## BB#0: ; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0, i8* %a1, <8 x i32> %idx, <8 x float> %mask, i8 2) ; ret <8 x float> %res @@ -1639,12 +2318,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_q_ps: ; X64: ## BB#0: ; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0, i8* %a1, <2 x i64> %idx, <4 x float> %mask, i8 2) ; ret <4 x float> %res @@ -1658,13 +2337,13 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48] ; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_q_ps_256: ; X64: ## BB#0: ; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x4f] ; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %a0, i8* %a1, <4 x i64> %idx, <4 x float> %mask, i8 2) ; ret <4 x float> %res @@ -1677,12 +2356,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_d_q: ; X64: ## BB#0: ; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0, i8* %a1, <4 x i32> %idx, <2 x i64> %mask, i8 2) ; ret <2 x i64> %res @@ -1695,12 +2374,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_d_q_256: ; X64: ## BB#0: ; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0, i8* %a1, <4 x i32> %idx, <4 x i64> %mask, i8 2) ; ret <4 x i64> %res @@ -1713,12 +2392,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_q_q: ; X64: ## BB#0: ; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0, i8* %a1, <2 x i64> %idx, <2 x i64> %mask, i8 2) ; ret <2 x i64> %res @@ -1731,12 +2410,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_q_q_256: ; X64: ## BB#0: ; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0, i8* %a1, <4 x i64> %idx, <4 x i64> %mask, i8 2) ; ret <4 x i64> %res @@ -1749,12 +2428,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_d_d: ; X64: ## BB#0: ; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0, i8* %a1, <4 x i32> %idx, <4 x i32> %mask, i8 2) ; ret <4 x i32> %res @@ -1767,12 +2446,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_d_d_256: ; X64: ## BB#0: ; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0, i8* %a1, <8 x i32> %idx, <8 x i32> %mask, i8 2) ; ret <8 x i32> %res @@ -1785,12 +2464,12 @@ ; X86: ## BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x48] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_q_d: ; X64: ## BB#0: ; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x4f] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0, i8* %a1, <2 x i64> %idx, <4 x i32> %mask, i8 2) ; ret <4 x i32> %res @@ -1804,13 +2483,13 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48] ; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_x86_avx2_gather_q_d_256: ; X64: ## BB#0: ; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x4f] ; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %a0, i8* %a1, <4 x i64> %idx, <4 x i32> %mask, i8 2) ; ret <4 x i32> %res @@ -1828,7 +2507,7 @@ ; X86-AVX-NEXT: vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda] ; X86-AVX-NEXT: vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89] ; X86-AVX-NEXT: vmovups %ymm2, (%eax) ## encoding: [0xc5,0xfc,0x11,0x10] -; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX-NEXT: retl ## encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_gather_mask: ; X86-AVX512VL: ## BB#0: @@ -1837,21 +2516,21 @@ ; X86-AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] ; X86-AVX512VL-NEXT: vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89] ; X86-AVX512VL-NEXT: vmovups %ymm2, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x10] -; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X86-AVX512VL-NEXT: retl ## encoding: [0xc3] ; ; X64-AVX-LABEL: test_gather_mask: ; X64-AVX: ## BB#0: ; X64-AVX-NEXT: vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda] ; X64-AVX-NEXT: vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f] ; X64-AVX-NEXT: vmovups %ymm2, (%rsi) ## encoding: [0xc5,0xfc,0x11,0x16] -; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX-NEXT: retq ## encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_gather_mask: ; X64-AVX512VL: ## BB#0: ; X64-AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] ; X64-AVX512VL-NEXT: vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f] ; X64-AVX512VL-NEXT: vmovups %ymm2, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x16] -; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; X64-AVX512VL-NEXT: retq ## encoding: [0xc3] %a_i8 = bitcast float* %a to i8* %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0, i8* %a_i8, <8 x i32> %idx, <8 x float> %mask, i8 4) ; Index: test/CodeGen/X86/broadcastm-lowering.ll =================================================================== --- test/CodeGen/X86/broadcastm-lowering.ll +++ test/CodeGen/X86/broadcastm-lowering.ll @@ -43,29 +43,21 @@ define <4 x i32> @test_mm_epi32(<16 x i8> %a, <16 x i8> %b) { ; AVX512CD-LABEL: test_mm_epi32: ; AVX512CD: # BB#0: # %entry -; AVX512CD-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 -; AVX512CD-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512CD-NEXT: vpslld $31, %zmm0, %zmm0 -; AVX512CD-NEXT: vptestmd %zmm0, %zmm0, %k0 -; AVX512CD-NEXT: kmovw %k0, %eax -; AVX512CD-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX512CD-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 -; AVX512CD-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 -; AVX512CD-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 -; AVX512CD-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 -; AVX512CD-NEXT: vzeroupper +; AVX512CD-NEXT: vpmovmskb %xmm0, %eax +; AVX512CD-NEXT: vmovd %eax, %xmm0 +; AVX512CD-NEXT: vpbroadcastd %xmm0, %xmm0 ; AVX512CD-NEXT: retq ; ; AVX512VLCDBW-LABEL: test_mm_epi32: ; AVX512VLCDBW: # BB#0: # %entry -; AVX512VLCDBW-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 -; AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %xmm0 +; AVX512VLCDBW-NEXT: vpmovmskb %xmm0, %eax +; AVX512VLCDBW-NEXT: vpbroadcastd %eax, %xmm0 ; AVX512VLCDBW-NEXT: retq ; ; X86-AVX512VLCDBW-LABEL: test_mm_epi32: ; X86-AVX512VLCDBW: # BB#0: # %entry -; X86-AVX512VLCDBW-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 -; X86-AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %xmm0 +; X86-AVX512VLCDBW-NEXT: vpmovmskb %xmm0, %eax +; X86-AVX512VLCDBW-NEXT: vpbroadcastd %eax, %xmm0 ; X86-AVX512VLCDBW-NEXT: retl entry: %0 = icmp eq <16 x i8> %a, %b Index: test/CodeGen/X86/movmsk.ll =================================================================== --- test/CodeGen/X86/movmsk.ll +++ test/CodeGen/X86/movmsk.ll @@ -133,7 +133,7 @@ define i32 @t2(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp { ; CHECK-LABEL: t2: ; CHECK: ## BB#0: ## %entry -; CHECK-NEXT: movmskpd %xmm0, %eax +; CHECK-NEXT: movmskps %xmm0, %eax ; CHECK-NEXT: movl (%rdi,%rax,4), %eax ; CHECK-NEXT: retq entry: Index: test/CodeGen/X86/sse2-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -1826,11 +1826,13 @@ ; X32-LABEL: test_mm_movemask_epi8: ; X32: # BB#0: ; X32-NEXT: pmovmskb %xmm0, %eax +; X32-NEXT: movzwl %ax, %eax ; X32-NEXT: retl ; ; X64-LABEL: test_mm_movemask_epi8: ; X64: # BB#0: ; X64-NEXT: pmovmskb %xmm0, %eax +; X64-NEXT: movzwl %ax, %eax ; X64-NEXT: retq %arg0 = bitcast <2 x i64> %a0 to <16 x i8> %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %arg0) Index: test/Transforms/InstCombine/X86/x86-movmsk.ll =================================================================== --- test/Transforms/InstCombine/X86/x86-movmsk.ll +++ test/Transforms/InstCombine/X86/x86-movmsk.ll @@ -9,7 +9,7 @@ define i32 @test_upper_x86_mmx_pmovmskb(x86_mmx %a0) { ; CHECK-LABEL: @test_upper_x86_mmx_pmovmskb( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %a0) +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx [[A0:%.*]]) ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %a0) @@ -19,8 +19,11 @@ define i32 @test_upper_x86_sse_movmsk_ps(<4 x float> %a0) { ; CHECK-LABEL: @test_upper_x86_sse_movmsk_ps( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A0:%.*]] to <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i1> [[TMP2]] to i4 +; CHECK-NEXT: [[TMP4:%.*]] = zext i4 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) %2 = and i32 %1, 15 @@ -29,8 +32,11 @@ define i32 @test_upper_x86_sse2_movmsk_pd(<2 x double> %a0) { ; CHECK-LABEL: @test_upper_x86_sse2_movmsk_pd( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[A0:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <2 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i1> [[TMP2]] to i2 +; CHECK-NEXT: [[TMP4:%.*]] = zext i2 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) %2 = and i32 %1, 3 @@ -39,8 +45,10 @@ define i32 @test_upper_x86_sse2_pmovmskb_128(<16 x i8> %a0) { ; CHECK-LABEL: @test_upper_x86_sse2_pmovmskb_128( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <16 x i8> [[A0:%.*]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i1> [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[TMP2]] to i32 +; CHECK-NEXT: ret i32 [[TMP3]] ; %1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) %2 = and i32 %1, 65535 @@ -49,8 +57,11 @@ define i32 @test_upper_x86_avx_movmsk_ps_256(<8 x float> %a0) { ; CHECK-LABEL: @test_upper_x86_avx_movmsk_ps_256( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x float> [[A0:%.*]] to <8 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i1> [[TMP2]] to i8 +; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) %2 = and i32 %1, 255 @@ -59,8 +70,11 @@ define i32 @test_upper_x86_avx_movmsk_pd_256(<4 x double> %a0) { ; CHECK-LABEL: @test_upper_x86_avx_movmsk_pd_256( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x double> [[A0:%.*]] to <4 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i1> [[TMP2]] to i4 +; CHECK-NEXT: [[TMP4:%.*]] = zext i4 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) %2 = and i32 %1, 15