Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -6378,6 +6378,7 @@ /// Example: -> zextload a static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef Elts, const SDLoc &DL, SelectionDAG &DAG, + const X86Subtarget &Subtarget, bool isAfterLegalize) { unsigned NumElems = Elts.size(); @@ -6482,6 +6483,12 @@ if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT)) return SDValue(); + // Don't create 256-bit non-temporal aligned loads without AVX2 as these + // will lower to regular temporal loads and use the cache. + if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 && + VT.is256BitVector() && !Subtarget.hasInt256()) + return SDValue(); + if (IsConsecutiveLoad) return CreateLoad(VT, LDBase); @@ -7686,7 +7693,8 @@ // See if we can use a vector load to get all of the elements. if (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) { SmallVector Ops(Op->op_begin(), Op->op_begin() + NumElems); - if (SDValue LD = EltsFromConsecutiveLoads(VT, Ops, dl, DAG, false)) + if (SDValue LD = + EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false)) return LD; } @@ -28808,7 +28816,8 @@ } if (Elts.size() == VT.getVectorNumElements()) - if (SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true)) + if (SDValue LD = + EltsFromConsecutiveLoads(VT, Elts, dl, DAG, Subtarget, true)) return LD; // For AVX2, we sometimes want to combine @@ -32291,15 +32300,17 @@ const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // For chips with slow 32-byte unaligned loads, break the 32-byte operation - // into two 16-byte operations. + // into two 16-byte operations. Also split non-temporal aligned loads on AVX1 + // targets as 32-byte loads will lower to regular temporal loads. ISD::LoadExtType Ext = Ld->getExtensionType(); bool Fast; unsigned AddressSpace = Ld->getAddressSpace(); unsigned Alignment = Ld->getAlignment(); if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() && Ext == ISD::NON_EXTLOAD && - TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT, - AddressSpace, Alignment, &Fast) && !Fast) { + ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) || + (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT, + AddressSpace, Alignment, &Fast) && !Fast))) { unsigned NumElems = RegVT.getVectorNumElements(); if (NumElems < 2) return SDValue(); @@ -35008,7 +35019,8 @@ if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), OpVT, AS, Alignment, &Fast) && Fast) { SDValue Ops[] = {SubVec2, SubVec}; - if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false)) + if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, + Subtarget, false)) return Ld; } } Index: lib/Target/X86/X86InstrFragmentsSIMD.td =================================================================== --- lib/Target/X86/X86InstrFragmentsSIMD.td +++ lib/Target/X86/X86InstrFragmentsSIMD.td @@ -641,22 +641,37 @@ // SSE pattern fragments //===----------------------------------------------------------------------===// +// Vector load wrappers to prevent folding of non-temporal aligned loads on +// supporting targets. +def vec128load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + return !Subtarget->hasSSE41() || !cast(N)->isNonTemporal() || + cast(N)->getAlignment() < 16; +}]>; +def vec256load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + return !Subtarget->hasAVX2() || !cast(N)->isNonTemporal() || + cast(N)->getAlignment() < 32; +}]>; +def vec512load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + return !Subtarget->hasAVX512() || !cast(N)->isNonTemporal() || + cast(N)->getAlignment() < 64; +}]>; + // 128-bit load pattern fragments // NOTE: all 128-bit integer vector loads are promoted to v2i64 -def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>; -def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>; -def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; +def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (vec128load node:$ptr))>; +def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (vec128load node:$ptr))>; +def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (vec128load node:$ptr))>; // 256-bit load pattern fragments // NOTE: all 256-bit integer vector loads are promoted to v4i64 -def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>; -def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>; -def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>; +def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (vec256load node:$ptr))>; +def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (vec256load node:$ptr))>; +def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (vec256load node:$ptr))>; // 512-bit load pattern fragments -def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (load node:$ptr))>; -def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>; -def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>; +def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (vec512load node:$ptr))>; +def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (vec512load node:$ptr))>; +def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (vec512load node:$ptr))>; // 128-/256-/512-bit extload pattern fragments def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>; Index: test/CodeGen/X86/fast-isel-nontemporal.ll =================================================================== --- test/CodeGen/X86/fast-isel-nontemporal.ll +++ test/CodeGen/X86/fast-isel-nontemporal.ll @@ -957,8 +957,16 @@ ; ; AVX1-LABEL: test_load_nt16xfloat: ; AVX1: # BB#0: # %entry -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm0, %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm2, %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt16xfloat: @@ -1003,8 +1011,16 @@ ; ; AVX1-LABEL: test_load_nt8xdouble: ; AVX1: # BB#0: # %entry -; AVX1-NEXT: vmovapd (%rdi), %ymm0 -; AVX1-NEXT: vmovapd 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm0, %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm2, %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt8xdouble: @@ -1049,8 +1065,16 @@ ; ; AVX1-LABEL: test_load_nt64xi8: ; AVX1: # BB#0: # %entry -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm0, %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm2, %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt64xi8: @@ -1101,8 +1125,16 @@ ; ; AVX1-LABEL: test_load_nt32xi16: ; AVX1: # BB#0: # %entry -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm0, %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm2, %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt32xi16: @@ -1153,8 +1185,16 @@ ; ; AVX1-LABEL: test_load_nt16xi32: ; AVX1: # BB#0: # %entry -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm0, %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm2, %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt16xi32: @@ -1199,8 +1239,16 @@ ; ; AVX1-LABEL: test_load_nt8xi64: ; AVX1: # BB#0: # %entry -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm0, %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: # implicit-def: %YMM1 +; AVX1-NEXT: vmovaps %xmm2, %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_load_nt8xi64: Index: test/CodeGen/X86/nontemporal-loads.ll =================================================================== --- test/CodeGen/X86/nontemporal-loads.ll +++ test/CodeGen/X86/nontemporal-loads.ll @@ -168,7 +168,9 @@ ; ; AVX1-LABEL: test_v8f32: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v8f32: @@ -199,7 +201,9 @@ ; ; AVX1-LABEL: test_v8i32: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v8i32: @@ -240,7 +244,9 @@ ; ; AVX1-LABEL: test_v4f64: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4f64: @@ -271,7 +277,9 @@ ; ; AVX1-LABEL: test_v4i64: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i64: @@ -302,7 +310,9 @@ ; ; AVX1-LABEL: test_v16i16: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v16i16: @@ -333,7 +343,9 @@ ; ; AVX1-LABEL: test_v32i8: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v32i8: @@ -370,8 +382,12 @@ ; ; AVX1-LABEL: test_v16f32: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v16f32: @@ -407,8 +423,12 @@ ; ; AVX1-LABEL: test_v16i32: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v16i32: @@ -444,8 +464,12 @@ ; ; AVX1-LABEL: test_v8f64: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v8f64: @@ -481,8 +505,12 @@ ; ; AVX1-LABEL: test_v8i64: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v8i64: @@ -518,8 +546,12 @@ ; ; AVX1-LABEL: test_v32i16: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v32i16: @@ -567,8 +599,12 @@ ; ; AVX1-LABEL: test_v64i8: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v64i8: @@ -608,12 +644,14 @@ ; ; AVX-LABEL: test_arg_v4f32: ; AVX: # BB#0: -; AVX-NEXT: vaddps (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_arg_v4f32: ; AVX512: # BB#0: -; AVX512-NEXT: vaddps (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1 %2 = fadd <4 x float> %arg, %1 @@ -628,12 +666,14 @@ ; ; AVX-LABEL: test_arg_v4i32: ; AVX: # BB#0: -; AVX-NEXT: vpaddd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_arg_v4i32: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddd (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1 %2 = add <4 x i32> %arg, %1 @@ -648,12 +688,14 @@ ; ; AVX-LABEL: test_arg_v2f64: ; AVX: # BB#0: -; AVX-NEXT: vaddpd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_arg_v2f64: ; AVX512: # BB#0: -; AVX512-NEXT: vaddpd (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1 %2 = fadd <2 x double> %arg, %1 @@ -668,12 +710,14 @@ ; ; AVX-LABEL: test_arg_v2i64: ; AVX: # BB#0: -; AVX-NEXT: vpaddq (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_arg_v2i64: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddq (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1 %2 = add <2 x i64> %arg, %1 @@ -688,12 +732,14 @@ ; ; AVX-LABEL: test_arg_v8i16: ; AVX: # BB#0: -; AVX-NEXT: vpaddw (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_arg_v8i16: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddw (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1 %2 = add <8 x i16> %arg, %1 @@ -708,12 +754,14 @@ ; ; AVX-LABEL: test_arg_v16i8: ; AVX: # BB#0: -; AVX-NEXT: vpaddb (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_arg_v16i8: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddb (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1 %2 = add <16 x i8> %arg, %1 @@ -729,14 +777,24 @@ ; SSE-NEXT: addps 16(%rdi), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: test_arg_v8f32: -; AVX: # BB#0: -; AVX-NEXT: vaddps (%rdi), %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: test_arg_v8f32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_arg_v8f32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v8f32: ; AVX512: # BB#0: -; AVX512-NEXT: vaddps (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX512-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1 %2 = fadd <8 x float> %arg, %1 @@ -752,23 +810,36 @@ ; ; AVX1-LABEL: test_arg_v8i32: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_arg_v8i32: ; AVX2: # BB#0: -; AVX2-NEXT: vpaddd (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_arg_v8i32: -; AVX512: # BB#0: -; AVX512-NEXT: vpaddd (%rdi), %ymm0, %ymm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_arg_v8i32: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX512F-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_arg_v8i32: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX512BW-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_arg_v8i32: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 +; AVX512VL-NEXT: retq %1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1 %2 = add <8 x i32> %arg, %1 ret <8 x i32> %2 @@ -781,14 +852,24 @@ ; SSE-NEXT: addpd 16(%rdi), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: test_arg_v4f64: -; AVX: # BB#0: -; AVX-NEXT: vaddpd (%rdi), %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: test_arg_v4f64: +; AVX1: # BB#0: +; AVX1-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_arg_v4f64: +; AVX2: # BB#0: +; AVX2-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v4f64: ; AVX512: # BB#0: -; AVX512-NEXT: vaddpd (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX512-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1 %2 = fadd <4 x double> %arg, %1 @@ -804,22 +885,24 @@ ; ; AVX1-LABEL: test_arg_v4i64: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_arg_v4i64: ; AVX2: # BB#0: -; AVX2-NEXT: vpaddq (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v4i64: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddq (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1 %2 = add <4 x i64> %arg, %1 @@ -835,22 +918,24 @@ ; ; AVX1-LABEL: test_arg_v16i16: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_arg_v16i16: ; AVX2: # BB#0: -; AVX2-NEXT: vpaddw (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v16i16: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddw (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1 %2 = add <16 x i16> %arg, %1 @@ -866,22 +951,24 @@ ; ; AVX1-LABEL: test_arg_v32i8: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_arg_v32i8: ; AVX2: # BB#0: -; AVX2-NEXT: vpaddb (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v32i8: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddb (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vmovntdqa (%rdi), %ymm1 +; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1 %2 = add <32 x i8> %arg, %1 @@ -899,15 +986,30 @@ ; SSE-NEXT: addps 48(%rdi), %xmm3 ; SSE-NEXT: retq ; -; AVX-LABEL: test_arg_v16f32: -; AVX: # BB#0: -; AVX-NEXT: vaddps (%rdi), %ymm0, %ymm0 -; AVX-NEXT: vaddps 32(%rdi), %ymm1, %ymm1 -; AVX-NEXT: retq +; AVX1-LABEL: test_arg_v16f32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm3 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm4 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 +; AVX1-NEXT: vaddps %ymm3, %ymm0, %ymm0 +; AVX1-NEXT: vaddps %ymm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_arg_v16f32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX2-NEXT: vaddps %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v16f32: ; AVX512: # BB#0: -; AVX512-NEXT: vaddps (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %zmm1 +; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: retq %1 = load <16 x float>, <16 x float>* %src, align 64, !nontemporal !1 %2 = fadd <16 x float> %arg, %1 @@ -925,29 +1027,32 @@ ; ; AVX1-LABEL: test_arg_v16i32: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm2 -; AVX1-NEXT: vmovdqa 32(%rdi), %ymm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 -; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm4 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 +; AVX1-NEXT: vpaddd %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_arg_v16i32: ; AVX2: # BB#0: -; AVX2-NEXT: vpaddd (%rdi), %ymm0, %ymm0 -; AVX2-NEXT: vpaddd 32(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX2-NEXT: vpaddd %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v16i32: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddd (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %zmm1 +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: retq %1 = load <16 x i32>, <16 x i32>* %src, align 64, !nontemporal !1 %2 = add <16 x i32> %arg, %1 @@ -963,15 +1068,30 @@ ; SSE-NEXT: addpd 48(%rdi), %xmm3 ; SSE-NEXT: retq ; -; AVX-LABEL: test_arg_v8f64: -; AVX: # BB#0: -; AVX-NEXT: vaddpd (%rdi), %ymm0, %ymm0 -; AVX-NEXT: vaddpd 32(%rdi), %ymm1, %ymm1 -; AVX-NEXT: retq +; AVX1-LABEL: test_arg_v8f64: +; AVX1: # BB#0: +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm3 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm4 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 +; AVX1-NEXT: vaddpd %ymm3, %ymm0, %ymm0 +; AVX1-NEXT: vaddpd %ymm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_arg_v8f64: +; AVX2: # BB#0: +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX2-NEXT: vaddpd %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v8f64: ; AVX512: # BB#0: -; AVX512-NEXT: vaddpd (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %zmm1 +; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: retq %1 = load <8 x double>, <8 x double>* %src, align 64, !nontemporal !1 %2 = fadd <8 x double> %arg, %1 @@ -989,29 +1109,32 @@ ; ; AVX1-LABEL: test_arg_v8i64: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm2 -; AVX1-NEXT: vmovdqa 32(%rdi), %ymm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 -; AVX1-NEXT: vpaddq %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm4 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 +; AVX1-NEXT: vpaddq %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpaddq %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_arg_v8i64: ; AVX2: # BB#0: -; AVX2-NEXT: vpaddq (%rdi), %ymm0, %ymm0 -; AVX2-NEXT: vpaddq 32(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: test_arg_v8i64: ; AVX512: # BB#0: -; AVX512-NEXT: vpaddq (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: vmovntdqa (%rdi), %zmm1 +; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: retq %1 = load <8 x i64>, <8 x i64>* %src, align 64, !nontemporal !1 %2 = add <8 x i64> %arg, %1 @@ -1029,41 +1152,48 @@ ; ; AVX1-LABEL: test_arg_v32i16: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm2 -; AVX1-NEXT: vmovdqa 32(%rdi), %ymm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 -; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vpaddw %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm4 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 +; AVX1-NEXT: vpaddw %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpaddw %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpaddw %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_arg_v32i16: ; AVX2: # BB#0: -; AVX2-NEXT: vpaddw (%rdi), %ymm0, %ymm0 -; AVX2-NEXT: vpaddw 32(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX2-NEXT: vpaddw %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: test_arg_v32i16: ; AVX512F: # BB#0: -; AVX512F-NEXT: vpaddw (%rdi), %ymm0, %ymm0 -; AVX512F-NEXT: vpaddw 32(%rdi), %ymm1, %ymm1 +; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX512F-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX512F-NEXT: vpaddw %ymm3, %ymm0, %ymm0 +; AVX512F-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: test_arg_v32i16: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vpaddw (%rdi), %zmm0, %zmm0 +; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm1 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VL-LABEL: test_arg_v32i16: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 -; AVX512VL-NEXT: vpaddw 32(%rdi), %ymm1, %ymm1 +; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX512VL-NEXT: vpaddw %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: retq %1 = load <32 x i16>, <32 x i16>* %src, align 64, !nontemporal !1 %2 = add <32 x i16> %arg, %1 @@ -1081,41 +1211,48 @@ ; ; AVX1-LABEL: test_arg_v64i8: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa (%rdi), %ymm2 -; AVX1-NEXT: vmovdqa 32(%rdi), %ymm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 -; AVX1-NEXT: vpaddb %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3 +; AVX1-NEXT: vmovntdqa (%rdi), %xmm4 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 +; AVX1-NEXT: vpaddb %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_arg_v64i8: ; AVX2: # BB#0: -; AVX2-NEXT: vpaddb (%rdi), %ymm0, %ymm0 -; AVX2-NEXT: vpaddb 32(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX2-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: test_arg_v64i8: ; AVX512F: # BB#0: -; AVX512F-NEXT: vpaddb (%rdi), %ymm0, %ymm0 -; AVX512F-NEXT: vpaddb 32(%rdi), %ymm1, %ymm1 +; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX512F-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0 +; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: test_arg_v64i8: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vpaddb (%rdi), %zmm0, %zmm0 +; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm1 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VL-LABEL: test_arg_v64i8: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 -; AVX512VL-NEXT: vpaddb 32(%rdi), %ymm1, %ymm1 +; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm2 +; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm3 +; AVX512VL-NEXT: vpaddb %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: retq %1 = load <64 x i8>, <64 x i8>* %src, align 64, !nontemporal !1 %2 = add <64 x i8> %arg, %1