diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -4081,6 +4081,8 @@ def mno_tsxldtrk : Flag<["-"], "mno-tsxldtrk">, Group; def muintr : Flag<["-"], "muintr">, Group; def mno_uintr : Flag<["-"], "mno-uintr">, Group; +def munalignedvecmove : Flag<["-"], "muse-unaligned-vector-move">, Group; +def mno_unalignedvecmove : Flag<["-"], "mno-use-unaligned-vector-move">, Group; def mvaes : Flag<["-"], "mvaes">, Group; def mno_vaes : Flag<["-"], "mno-vaes">, Group; def mvpclmulqdq : Flag<["-"], "mvpclmulqdq">, Group; diff --git a/clang/test/Driver/x86-target-features.c b/clang/test/Driver/x86-target-features.c --- a/clang/test/Driver/x86-target-features.c +++ b/clang/test/Driver/x86-target-features.c @@ -293,3 +293,8 @@ // RUN: %clang -target i386-unknown-linux-gnu -march=i386 -mno-avxvnni %s -### -o %t.o 2>&1 | FileCheck --check-prefix=NO-AVX-VNNI %s // AVX-VNNI: "-target-feature" "+avxvnni" // NO-AVX-VNNI: "-target-feature" "-avxvnni" + +// RUN: %clang -target i386-linux-gnu -muse-unaligned-vector-move %s -### -o %t.o 2>&1 | FileCheck -check-prefix=UNALIGNEDVECMOVE %s +// RUN: %clang -target i386-linux-gnu -mno-use-unaligned-vector-move %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-UNALIGNEDVECMOVE %s +// UNALIGNEDVECMOVE: "-target-feature" "+use-unaligned-vector-move" +// NO-UNALIGNEDVECMOVE: "-target-feature" "-use-unaligned-vector-move" diff --git a/llvm/include/llvm/Support/X86TargetParser.def b/llvm/include/llvm/Support/X86TargetParser.def --- a/llvm/include/llvm/Support/X86TargetParser.def +++ b/llvm/include/llvm/Support/X86TargetParser.def @@ -200,5 +200,6 @@ X86_FEATURE (RETPOLINE_INDIRECT_CALLS, "retpoline-indirect-calls") X86_FEATURE (LVI_CFI, "lvi-cfi") X86_FEATURE (LVI_LOAD_HARDENING, "lvi-load-hardening") +X86_FEATURE (UNALIGNED_VECTOR_MOVE, "use-unaligned-vector-move") #undef X86_FEATURE_COMPAT #undef X86_FEATURE diff --git a/llvm/lib/Support/X86TargetParser.cpp b/llvm/lib/Support/X86TargetParser.cpp --- a/llvm/lib/Support/X86TargetParser.cpp +++ b/llvm/lib/Support/X86TargetParser.cpp @@ -510,6 +510,7 @@ constexpr FeatureBitset ImpliedFeaturesRETPOLINE_INDIRECT_CALLS = {}; constexpr FeatureBitset ImpliedFeaturesLVI_CFI = {}; constexpr FeatureBitset ImpliedFeaturesLVI_LOAD_HARDENING = {}; +constexpr FeatureBitset ImpliedFeaturesUNALIGNED_VECTOR_MOVE = {}; // XSAVE features are dependent on basic XSAVE. constexpr FeatureBitset ImpliedFeaturesXSAVEC = FeatureXSAVE; diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -522,6 +522,12 @@ def FeatureUseAA : SubtargetFeature<"use-aa", "UseAA", "true", "Use alias analysis during codegen">; +/// Always emit unaligned move instructions on AVX machine. +def FeatureUnalignedVecMove : SubtargetFeature<"use-unaligned-vector-move", + "UseUnalignedVectorMove", "true", + "Always emit unaligned vector move instructions " + "on AVX machine.">; + // Bonnell def ProcIntelAtom : SubtargetFeature<"", "X86ProcFamily", "IntelAtom", "">; // Silvermont diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -824,15 +824,21 @@ def extloadv4f32 : PatFrag<(ops node:$ptr), (extloadvf32 node:$ptr)>; def extloadv8f32 : PatFrag<(ops node:$ptr), (extloadvf32 node:$ptr)>; -// Like 'store', but always requires vector size alignment. +// Like 'store', but always requires vector size alignment when target doesn't +// have use-unaligned-vector-move feature. def alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ + if (Subtarget->useUnalignedVecMove()) + return false; auto *St = cast(N); return St->getAlignment() >= St->getMemoryVT().getStoreSize(); }]>; -// Like 'load', but always requires vector size alignment. +// Like 'load', but always requires vector size alignment when target doesn't +// have use-unaligned-vector-move feature. def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + if (Subtarget->useUnalignedVecMove()) + return false; auto *Ld = cast(N); return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); }]>; diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h --- a/llvm/lib/Target/X86/X86Subtarget.h +++ b/llvm/lib/Target/X86/X86Subtarget.h @@ -468,6 +468,9 @@ /// Use alias analysis during code generation. bool UseAA = false; + /// Always emit unaligned vector move instructions on AVX machine. + bool UseUnalignedVectorMove = false; + /// The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. Align stackAlignment = Align(4); @@ -903,6 +906,14 @@ } } + /// Unaligned vector move achieve the same performance as aligned vector move + /// does when the address is aligned on AVX machine. We will always emit + /// unaligned vector move on AVX machine when the UseUnalignedVectorMove is + /// set. + bool useUnalignedVecMove() const { + return hasAVX() && UseUnalignedVectorMove; + } + /// Classify a global variable reference for the current subtarget according /// to how we should reference it in a non-pcrel context. unsigned char classifyLocalReference(const GlobalValue *GV) const; diff --git a/llvm/test/CodeGen/X86/avx-unaligned-load-store.ll b/llvm/test/CodeGen/X86/avx-unaligned-load-store.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx-unaligned-load-store.ll @@ -0,0 +1,441 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2,+use-unaligned-vector-move | FileCheck %s -check-prefix=CHECK_SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,+use-unaligned-vector-move | FileCheck %s -check-prefix=CHECK_AVX +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=sse4.2,+use-unaligned-vector-move | FileCheck %s -check-prefix=CHECK_SSE32 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx,+use-unaligned-vector-move | FileCheck %s -check-prefix=CHECK_AVX32 + +define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind { +; CHECK_SSE-LABEL: test_256_load: +; CHECK_SSE: # %bb.0: # %entry +; CHECK_SSE-NEXT: pushq %r15 +; CHECK_SSE-NEXT: pushq %r14 +; CHECK_SSE-NEXT: pushq %rbx +; CHECK_SSE-NEXT: subq $96, %rsp +; CHECK_SSE-NEXT: movq %rdx, %r14 +; CHECK_SSE-NEXT: movq %rsi, %r15 +; CHECK_SSE-NEXT: movq %rdi, %rbx +; CHECK_SSE-NEXT: movaps (%rdx), %xmm4 +; CHECK_SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps 16(%rdx), %xmm5 +; CHECK_SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps (%rsi), %xmm2 +; CHECK_SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps 16(%rsi), %xmm3 +; CHECK_SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps (%rdi), %xmm0 +; CHECK_SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps 16(%rdi), %xmm1 +; CHECK_SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; CHECK_SSE-NEXT: callq dummy@PLT +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, (%rbx) +; CHECK_SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, 16(%rbx) +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, (%r15) +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, 16(%r15) +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, (%r14) +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, 16(%r14) +; CHECK_SSE-NEXT: addq $96, %rsp +; CHECK_SSE-NEXT: popq %rbx +; CHECK_SSE-NEXT: popq %r14 +; CHECK_SSE-NEXT: popq %r15 +; CHECK_SSE-NEXT: retq +; +; CHECK_AVX-LABEL: test_256_load: +; CHECK_AVX: # %bb.0: # %entry +; CHECK_AVX-NEXT: pushq %r15 +; CHECK_AVX-NEXT: pushq %r14 +; CHECK_AVX-NEXT: pushq %rbx +; CHECK_AVX-NEXT: subq $96, %rsp +; CHECK_AVX-NEXT: movq %rdx, %r14 +; CHECK_AVX-NEXT: movq %rsi, %r15 +; CHECK_AVX-NEXT: movq %rdi, %rbx +; CHECK_AVX-NEXT: vmovups (%rdi), %ymm0 +; CHECK_AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK_AVX-NEXT: vmovups (%rsi), %ymm1 +; CHECK_AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK_AVX-NEXT: vmovups (%rdx), %ymm2 +; CHECK_AVX-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill +; CHECK_AVX-NEXT: callq dummy@PLT +; CHECK_AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK_AVX-NEXT: vmovups %ymm0, (%rbx) +; CHECK_AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK_AVX-NEXT: vmovups %ymm0, (%r15) +; CHECK_AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload +; CHECK_AVX-NEXT: vmovups %ymm0, (%r14) +; CHECK_AVX-NEXT: addq $96, %rsp +; CHECK_AVX-NEXT: popq %rbx +; CHECK_AVX-NEXT: popq %r14 +; CHECK_AVX-NEXT: popq %r15 +; CHECK_AVX-NEXT: vzeroupper +; CHECK_AVX-NEXT: retq +; +; CHECK_SSE32-LABEL: test_256_load: +; CHECK_SSE32: # %bb.0: # %entry +; CHECK_SSE32-NEXT: pushl %ebp +; CHECK_SSE32-NEXT: movl %esp, %ebp +; CHECK_SSE32-NEXT: pushl %ebx +; CHECK_SSE32-NEXT: pushl %edi +; CHECK_SSE32-NEXT: pushl %esi +; CHECK_SSE32-NEXT: andl $-16, %esp +; CHECK_SSE32-NEXT: subl $160, %esp +; CHECK_SSE32-NEXT: movl 16(%ebp), %esi +; CHECK_SSE32-NEXT: movl 12(%ebp), %edi +; CHECK_SSE32-NEXT: movl 8(%ebp), %ebx +; CHECK_SSE32-NEXT: movaps (%ebx), %xmm0 +; CHECK_SSE32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK_SSE32-NEXT: movaps 16(%ebx), %xmm1 +; CHECK_SSE32-NEXT: movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK_SSE32-NEXT: movaps (%edi), %xmm2 +; CHECK_SSE32-NEXT: movaps %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK_SSE32-NEXT: movaps 16(%edi), %xmm3 +; CHECK_SSE32-NEXT: movaps %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK_SSE32-NEXT: movaps (%esi), %xmm4 +; CHECK_SSE32-NEXT: movaps %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK_SSE32-NEXT: movaps 16(%esi), %xmm5 +; CHECK_SSE32-NEXT: movaps %xmm5, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK_SSE32-NEXT: movaps %xmm5, {{[0-9]+}}(%esp) +; CHECK_SSE32-NEXT: movaps %xmm4, {{[0-9]+}}(%esp) +; CHECK_SSE32-NEXT: movaps %xmm3, (%esp) +; CHECK_SSE32-NEXT: calll dummy@PLT +; CHECK_SSE32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE32-NEXT: movaps %xmm0, (%ebx) +; CHECK_SSE32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE32-NEXT: movaps %xmm0, 16(%ebx) +; CHECK_SSE32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE32-NEXT: movaps %xmm0, (%edi) +; CHECK_SSE32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE32-NEXT: movaps %xmm0, 16(%edi) +; CHECK_SSE32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE32-NEXT: movaps %xmm0, (%esi) +; CHECK_SSE32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE32-NEXT: movaps %xmm0, 16(%esi) +; CHECK_SSE32-NEXT: leal -12(%ebp), %esp +; CHECK_SSE32-NEXT: popl %esi +; CHECK_SSE32-NEXT: popl %edi +; CHECK_SSE32-NEXT: popl %ebx +; CHECK_SSE32-NEXT: popl %ebp +; CHECK_SSE32-NEXT: retl +; +; CHECK_AVX32-LABEL: test_256_load: +; CHECK_AVX32: # %bb.0: # %entry +; CHECK_AVX32-NEXT: pushl %ebx +; CHECK_AVX32-NEXT: pushl %edi +; CHECK_AVX32-NEXT: pushl %esi +; CHECK_AVX32-NEXT: subl $112, %esp +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %edi +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; CHECK_AVX32-NEXT: vmovups (%ebx), %ymm0 +; CHECK_AVX32-NEXT: vmovups %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill +; CHECK_AVX32-NEXT: vmovups (%edi), %ymm1 +; CHECK_AVX32-NEXT: vmovups %ymm1, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill +; CHECK_AVX32-NEXT: vmovups (%esi), %ymm2 +; CHECK_AVX32-NEXT: vmovups %ymm2, (%esp) # 32-byte Spill +; CHECK_AVX32-NEXT: calll dummy@PLT +; CHECK_AVX32-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK_AVX32-NEXT: vmovups %ymm0, (%ebx) +; CHECK_AVX32-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK_AVX32-NEXT: vmovups %ymm0, (%edi) +; CHECK_AVX32-NEXT: vmovups (%esp), %ymm0 # 32-byte Reload +; CHECK_AVX32-NEXT: vmovups %ymm0, (%esi) +; CHECK_AVX32-NEXT: addl $112, %esp +; CHECK_AVX32-NEXT: popl %esi +; CHECK_AVX32-NEXT: popl %edi +; CHECK_AVX32-NEXT: popl %ebx +; CHECK_AVX32-NEXT: vzeroupper +; CHECK_AVX32-NEXT: retl +entry: + %0 = bitcast double* %d to <4 x double>* + %tmp1.i = load <4 x double>, <4 x double>* %0, align 32 + %1 = bitcast float* %f to <8 x float>* + %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32 + %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32 + tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind + store <4 x double> %tmp1.i, <4 x double>* %0, align 32 + store <8 x float> %tmp1.i17, <8 x float>* %1, align 32 + store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32 + ret void +} + +declare void @dummy(<4 x double>, <8 x float>, <4 x i64>) + +define void @storev16i16(<16 x i16> %a) nounwind { +; CHECK_SSE-LABEL: storev16i16: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps %xmm1, (%rax) +; CHECK_SSE-NEXT: movaps %xmm0, (%rax) +; +; CHECK_AVX-LABEL: storev16i16: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: vmovups %ymm0, (%rax) +; +; CHECK_SSE32-LABEL: storev16i16: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movaps %xmm1, (%eax) +; CHECK_SSE32-NEXT: movaps %xmm0, (%eax) +; +; CHECK_AVX32-LABEL: storev16i16: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: vmovups %ymm0, (%eax) + store <16 x i16> %a, <16 x i16>* undef, align 32 + unreachable +} + +define void @storev16i16_01(<16 x i16> %a) nounwind { +; CHECK_SSE-LABEL: storev16i16_01: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movups %xmm1, (%rax) +; CHECK_SSE-NEXT: movups %xmm0, (%rax) +; +; CHECK_AVX-LABEL: storev16i16_01: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: vmovups %ymm0, (%rax) +; +; CHECK_SSE32-LABEL: storev16i16_01: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movups %xmm1, (%eax) +; CHECK_SSE32-NEXT: movups %xmm0, (%eax) +; +; CHECK_AVX32-LABEL: storev16i16_01: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: vmovups %ymm0, (%eax) + store <16 x i16> %a, <16 x i16>* undef, align 4 + unreachable +} + +define void @storev32i8(<32 x i8> %a) nounwind { +; CHECK_SSE-LABEL: storev32i8: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps %xmm1, (%rax) +; CHECK_SSE-NEXT: movaps %xmm0, (%rax) +; +; CHECK_AVX-LABEL: storev32i8: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: vmovups %ymm0, (%rax) +; +; CHECK_SSE32-LABEL: storev32i8: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movaps %xmm1, (%eax) +; CHECK_SSE32-NEXT: movaps %xmm0, (%eax) +; +; CHECK_AVX32-LABEL: storev32i8: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: vmovups %ymm0, (%eax) + store <32 x i8> %a, <32 x i8>* undef, align 32 + unreachable +} + +define void @storev32i8_01(<32 x i8> %a) nounwind { +; CHECK_SSE-LABEL: storev32i8_01: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movups %xmm1, (%rax) +; CHECK_SSE-NEXT: movups %xmm0, (%rax) +; +; CHECK_AVX-LABEL: storev32i8_01: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: vmovups %ymm0, (%rax) +; +; CHECK_SSE32-LABEL: storev32i8_01: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movups %xmm1, (%eax) +; CHECK_SSE32-NEXT: movups %xmm0, (%eax) +; +; CHECK_AVX32-LABEL: storev32i8_01: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: vmovups %ymm0, (%eax) + store <32 x i8> %a, <32 x i8>* undef, align 4 + unreachable +} + +; It is faster to make two saves, if the data is already in xmm registers. For +; example, after making an integer operation. +define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp { +; CHECK_SSE-LABEL: double_save: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK_AVX-LABEL: double_save: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: vmovups %xmm1, 16(%rdi) +; CHECK_AVX-NEXT: vmovups %xmm0, (%rdi) +; CHECK_AVX-NEXT: retq +; +; CHECK_SSE32-LABEL: double_save: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_SSE32-NEXT: movaps %xmm1, 16(%eax) +; CHECK_SSE32-NEXT: movaps %xmm0, (%eax) +; CHECK_SSE32-NEXT: retl +; +; CHECK_AVX32-LABEL: double_save: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_AVX32-NEXT: vmovups %xmm1, 16(%eax) +; CHECK_AVX32-NEXT: vmovups %xmm0, (%eax) +; CHECK_AVX32-NEXT: retl + %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> + store <8 x i32> %Z, <8 x i32>* %P, align 16 + ret void +} + +define void @double_save_volatile(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind { +; CHECK_SSE-LABEL: double_save_volatile: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK_AVX-LABEL: double_save_volatile: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; CHECK_AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK_AVX-NEXT: vmovups %ymm0, (%rdi) +; CHECK_AVX-NEXT: vzeroupper +; CHECK_AVX-NEXT: retq +; +; CHECK_SSE32-LABEL: double_save_volatile: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_SSE32-NEXT: movaps %xmm1, 16(%eax) +; CHECK_SSE32-NEXT: movaps %xmm0, (%eax) +; CHECK_SSE32-NEXT: retl +; +; CHECK_AVX32-LABEL: double_save_volatile: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_AVX32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK_AVX32-NEXT: vmovups %ymm0, (%eax) +; CHECK_AVX32-NEXT: vzeroupper +; CHECK_AVX32-NEXT: retl + %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> + store volatile <8 x i32> %Z, <8 x i32>* %P, align 16 + ret void +} + +define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind { +; CHECK_SSE-LABEL: add8i32: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movups (%rsi), %xmm0 +; CHECK_SSE-NEXT: movups 16(%rsi), %xmm1 +; CHECK_SSE-NEXT: movups %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: movups %xmm0, (%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK_AVX-LABEL: add8i32: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: vmovups (%rsi), %ymm0 +; CHECK_AVX-NEXT: vmovups %ymm0, (%rdi) +; CHECK_AVX-NEXT: vzeroupper +; CHECK_AVX-NEXT: retq +; +; CHECK_SSE32-LABEL: add8i32: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_SSE32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK_SSE32-NEXT: movups (%ecx), %xmm0 +; CHECK_SSE32-NEXT: movups 16(%ecx), %xmm1 +; CHECK_SSE32-NEXT: movups %xmm1, 16(%eax) +; CHECK_SSE32-NEXT: movups %xmm0, (%eax) +; CHECK_SSE32-NEXT: retl +; +; CHECK_AVX32-LABEL: add8i32: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK_AVX32-NEXT: vmovups (%ecx), %ymm0 +; CHECK_AVX32-NEXT: vmovups %ymm0, (%eax) +; CHECK_AVX32-NEXT: vzeroupper +; CHECK_AVX32-NEXT: retl + %b = load <8 x i32>, <8 x i32>* %bp, align 1 + %x = add <8 x i32> zeroinitializer, %b + store <8 x i32> %x, <8 x i32>* %ret, align 1 + ret void +} + +define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind { +; CHECK_SSE-LABEL: add4i64a64: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps (%rsi), %xmm0 +; CHECK_SSE-NEXT: movaps 16(%rsi), %xmm1 +; CHECK_SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK_AVX-LABEL: add4i64a64: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: vmovups (%rsi), %ymm0 +; CHECK_AVX-NEXT: vmovups %ymm0, (%rdi) +; CHECK_AVX-NEXT: vzeroupper +; CHECK_AVX-NEXT: retq +; +; CHECK_SSE32-LABEL: add4i64a64: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_SSE32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK_SSE32-NEXT: movaps (%ecx), %xmm0 +; CHECK_SSE32-NEXT: movaps 16(%ecx), %xmm1 +; CHECK_SSE32-NEXT: movaps %xmm0, (%eax) +; CHECK_SSE32-NEXT: movaps %xmm1, 16(%eax) +; CHECK_SSE32-NEXT: retl +; +; CHECK_AVX32-LABEL: add4i64a64: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK_AVX32-NEXT: vmovups (%ecx), %ymm0 +; CHECK_AVX32-NEXT: vmovups %ymm0, (%eax) +; CHECK_AVX32-NEXT: vzeroupper +; CHECK_AVX32-NEXT: retl + %b = load <4 x i64>, <4 x i64>* %bp, align 64 + %x = add <4 x i64> zeroinitializer, %b + store <4 x i64> %x, <4 x i64>* %ret, align 64 + ret void +} + +define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind { +; CHECK_SSE-LABEL: add4i64a16: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps (%rsi), %xmm0 +; CHECK_SSE-NEXT: movaps 16(%rsi), %xmm1 +; CHECK_SSE-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK_AVX-LABEL: add4i64a16: +; CHECK_AVX: # %bb.0: +; CHECK_AVX-NEXT: vmovups (%rsi), %ymm0 +; CHECK_AVX-NEXT: vmovups %ymm0, (%rdi) +; CHECK_AVX-NEXT: vzeroupper +; CHECK_AVX-NEXT: retq +; +; CHECK_SSE32-LABEL: add4i64a16: +; CHECK_SSE32: # %bb.0: +; CHECK_SSE32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_SSE32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK_SSE32-NEXT: movaps (%ecx), %xmm0 +; CHECK_SSE32-NEXT: movaps 16(%ecx), %xmm1 +; CHECK_SSE32-NEXT: movaps %xmm1, 16(%eax) +; CHECK_SSE32-NEXT: movaps %xmm0, (%eax) +; CHECK_SSE32-NEXT: retl +; +; CHECK_AVX32-LABEL: add4i64a16: +; CHECK_AVX32: # %bb.0: +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK_AVX32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK_AVX32-NEXT: vmovups (%ecx), %ymm0 +; CHECK_AVX32-NEXT: vmovups %ymm0, (%eax) +; CHECK_AVX32-NEXT: vzeroupper +; CHECK_AVX32-NEXT: retl + %b = load <4 x i64>, <4 x i64>* %bp, align 16 + %x = add <4 x i64> zeroinitializer, %b + store <4 x i64> %x, <4 x i64>* %ret, align 16 + ret void +} diff --git a/llvm/test/CodeGen/X86/avx512-unaligned-load-store.ll b/llvm/test/CodeGen/X86/avx512-unaligned-load-store.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-unaligned-load-store.ll @@ -0,0 +1,595 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,+use-unaligned-vector-move | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512f,+use-unaligned-vector-move | FileCheck %s -check-prefix=X86 + +define <16 x i32> @test17(i8 * %addr) { +; X64-LABEL: test17: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: test17: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %zmm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <16 x i32>* + %res = load <16 x i32>, <16 x i32>* %vaddr, align 64 + ret <16 x i32>%res +} + +define void @test18(i8 * %addr, <8 x i64> %data) { +; X64-LABEL: test18: +; X64: # %bb.0: +; X64-NEXT: vmovups %zmm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test18: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %zmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x i64>* + store <8 x i64>%data, <8 x i64>* %vaddr, align 64 + ret void +} + +define void @test19(i8 * %addr, <16 x i32> %data) { +; X64-LABEL: test19: +; X64: # %bb.0: +; X64-NEXT: vmovups %zmm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test19: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %zmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <16 x i32>* + store <16 x i32>%data, <16 x i32>* %vaddr, align 1 + ret void +} + +define void @test20(i8 * %addr, <16 x i32> %data) { +; X64-LABEL: test20: +; X64: # %bb.0: +; X64-NEXT: vmovups %zmm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test20: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %zmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <16 x i32>* + store <16 x i32>%data, <16 x i32>* %vaddr, align 64 + ret void +} + +define <8 x i64> @test21(i8 * %addr) { +; X64-LABEL: test21: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: test21: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %zmm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x i64>* + %res = load <8 x i64>, <8 x i64>* %vaddr, align 64 + ret <8 x i64>%res +} + +define void @test22(i8 * %addr, <8 x i64> %data) { +; X64-LABEL: test22: +; X64: # %bb.0: +; X64-NEXT: vmovups %zmm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test22: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %zmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x i64>* + store <8 x i64>%data, <8 x i64>* %vaddr, align 1 + ret void +} + +define <8 x i64> @test23(i8 * %addr) { +; X64-LABEL: test23: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: test23: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %zmm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x i64>* + %res = load <8 x i64>, <8 x i64>* %vaddr, align 1 + ret <8 x i64>%res +} + +define void @test24(i8 * %addr, <8 x double> %data) { +; X64-LABEL: test24: +; X64: # %bb.0: +; X64-NEXT: vmovups %zmm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test24: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %zmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x double>* + store <8 x double>%data, <8 x double>* %vaddr, align 64 + ret void +} + +define <8 x double> @test25(i8 * %addr) { +; X64-LABEL: test25: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: test25: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %zmm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x double>* + %res = load <8 x double>, <8 x double>* %vaddr, align 64 + ret <8 x double>%res +} + +define void @test26(i8 * %addr, <16 x float> %data) { +; X64-LABEL: test26: +; X64: # %bb.0: +; X64-NEXT: vmovups %zmm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test26: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %zmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <16 x float>* + store <16 x float>%data, <16 x float>* %vaddr, align 64 + ret void +} + +define <16 x float> @test27(i8 * %addr) { +; X64-LABEL: test27: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: test27: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %zmm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <16 x float>* + %res = load <16 x float>, <16 x float>* %vaddr, align 64 + ret <16 x float>%res +} + +define void @test28(i8 * %addr, <8 x double> %data) { +; X64-LABEL: test28: +; X64: # %bb.0: +; X64-NEXT: vmovups %zmm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test28: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %zmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x double>* + store <8 x double>%data, <8 x double>* %vaddr, align 1 + ret void +} + +define <8 x double> @test29(i8 * %addr) { +; X64-LABEL: test29: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: test29: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %zmm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x double>* + %res = load <8 x double>, <8 x double>* %vaddr, align 1 + ret <8 x double>%res +} + +define void @test30(i8 * %addr, <16 x float> %data) { +; X64-LABEL: test30: +; X64: # %bb.0: +; X64-NEXT: vmovups %zmm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test30: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %zmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <16 x float>* + store <16 x float>%data, <16 x float>* %vaddr, align 1 + ret void +} + +define <16 x float> @test31(i8 * %addr) { +; X64-LABEL: test31: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: test31: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %zmm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <16 x float>* + %res = load <16 x float>, <16 x float>* %vaddr, align 1 + ret <16 x float>%res +} + +define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) { +; X64-LABEL: test32: +; X64: # %bb.0: +; X64-NEXT: vptestmd %zmm1, %zmm1, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test32: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %zmm1, %zmm1, %k1 +; X86-NEXT: vmovdqu32 (%eax), %zmm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x i32>* + %r = load <16 x i32>, <16 x i32>* %vaddr, align 64 + %res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> %old + ret <16 x i32>%res +} + +define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) { +; X64-LABEL: test33: +; X64: # %bb.0: +; X64-NEXT: vptestmd %zmm1, %zmm1, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test33: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %zmm1, %zmm1, %k1 +; X86-NEXT: vmovdqu32 (%eax), %zmm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x i32>* + %r = load <16 x i32>, <16 x i32>* %vaddr, align 1 + %res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> %old + ret <16 x i32>%res +} + +define <16 x i32> @test34(i8 * %addr, <16 x i32> %mask1) { +; X64-LABEL: test34: +; X64: # %bb.0: +; X64-NEXT: vptestmd %zmm0, %zmm0, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test34: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %zmm0, %zmm0, %k1 +; X86-NEXT: vmovdqu32 (%eax), %zmm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x i32>* + %r = load <16 x i32>, <16 x i32>* %vaddr, align 64 + %res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> zeroinitializer + ret <16 x i32>%res +} + +define <16 x i32> @test35(i8 * %addr, <16 x i32> %mask1) { +; X64-LABEL: test35: +; X64: # %bb.0: +; X64-NEXT: vptestmd %zmm0, %zmm0, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test35: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %zmm0, %zmm0, %k1 +; X86-NEXT: vmovdqu32 (%eax), %zmm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x i32>* + %r = load <16 x i32>, <16 x i32>* %vaddr, align 1 + %res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> zeroinitializer + ret <16 x i32>%res +} + +define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) { +; X64-LABEL: test36: +; X64: # %bb.0: +; X64-NEXT: vptestmq %zmm1, %zmm1, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test36: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %zmm1, %zmm1, %k1 +; X86-NEXT: vmovdqu64 (%eax), %zmm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <8 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i64>* + %r = load <8 x i64>, <8 x i64>* %vaddr, align 64 + %res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> %old + ret <8 x i64>%res +} + +define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) { +; X64-LABEL: test37: +; X64: # %bb.0: +; X64-NEXT: vptestmq %zmm1, %zmm1, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test37: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %zmm1, %zmm1, %k1 +; X86-NEXT: vmovdqu64 (%eax), %zmm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <8 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i64>* + %r = load <8 x i64>, <8 x i64>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> %old + ret <8 x i64>%res +} + +define <8 x i64> @test38(i8 * %addr, <8 x i64> %mask1) { +; X64-LABEL: test38: +; X64: # %bb.0: +; X64-NEXT: vptestmq %zmm0, %zmm0, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test38: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %zmm0, %zmm0, %k1 +; X86-NEXT: vmovdqu64 (%eax), %zmm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <8 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i64>* + %r = load <8 x i64>, <8 x i64>* %vaddr, align 64 + %res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> zeroinitializer + ret <8 x i64>%res +} + +define <8 x i64> @test39(i8 * %addr, <8 x i64> %mask1) { +; X64-LABEL: test39: +; X64: # %bb.0: +; X64-NEXT: vptestmq %zmm0, %zmm0, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test39: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %zmm0, %zmm0, %k1 +; X86-NEXT: vmovdqu64 (%eax), %zmm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <8 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i64>* + %r = load <8 x i64>, <8 x i64>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> zeroinitializer + ret <8 x i64>%res +} + +define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1) { +; X64-LABEL: test40: +; X64: # %bb.0: +; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; X64-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 +; X64-NEXT: vmovups (%rdi), %zmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test40: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; X86-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 +; X86-NEXT: vmovups (%eax), %zmm0 {%k1} +; X86-NEXT: retl + %mask = fcmp one <16 x float> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x float>* + %r = load <16 x float>, <16 x float>* %vaddr, align 64 + %res = select <16 x i1> %mask, <16 x float> %r, <16 x float> %old + ret <16 x float>%res +} + +define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1) { +; X64-LABEL: test41: +; X64: # %bb.0: +; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; X64-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 +; X64-NEXT: vmovups (%rdi), %zmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test41: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; X86-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 +; X86-NEXT: vmovups (%eax), %zmm0 {%k1} +; X86-NEXT: retl + %mask = fcmp one <16 x float> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x float>* + %r = load <16 x float>, <16 x float>* %vaddr, align 1 + %res = select <16 x i1> %mask, <16 x float> %r, <16 x float> %old + ret <16 x float>%res +} + +define <16 x float> @test42(i8 * %addr, <16 x float> %mask1) { +; X64-LABEL: test42: +; X64: # %bb.0: +; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 +; X64-NEXT: vmovups (%rdi), %zmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test42: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X86-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 +; X86-NEXT: vmovups (%eax), %zmm0 {%k1} {z} +; X86-NEXT: retl + %mask = fcmp one <16 x float> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x float>* + %r = load <16 x float>, <16 x float>* %vaddr, align 64 + %res = select <16 x i1> %mask, <16 x float> %r, <16 x float> zeroinitializer + ret <16 x float>%res +} + +define <16 x float> @test43(i8 * %addr, <16 x float> %mask1) { +; X64-LABEL: test43: +; X64: # %bb.0: +; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 +; X64-NEXT: vmovups (%rdi), %zmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test43: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X86-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 +; X86-NEXT: vmovups (%eax), %zmm0 {%k1} {z} +; X86-NEXT: retl + %mask = fcmp one <16 x float> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x float>* + %r = load <16 x float>, <16 x float>* %vaddr, align 1 + %res = select <16 x i1> %mask, <16 x float> %r, <16 x float> zeroinitializer + ret <16 x float>%res +} + +define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1) { +; X64-LABEL: test44: +; X64: # %bb.0: +; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; X64-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 +; X64-NEXT: vmovupd (%rdi), %zmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test44: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; X86-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 +; X86-NEXT: vmovupd (%eax), %zmm0 {%k1} +; X86-NEXT: retl + %mask = fcmp one <8 x double> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x double>* + %r = load <8 x double>, <8 x double>* %vaddr, align 64 + %res = select <8 x i1> %mask, <8 x double> %r, <8 x double> %old + ret <8 x double>%res +} + +define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1) { +; X64-LABEL: test45: +; X64: # %bb.0: +; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; X64-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 +; X64-NEXT: vmovupd (%rdi), %zmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test45: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; X86-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 +; X86-NEXT: vmovupd (%eax), %zmm0 {%k1} +; X86-NEXT: retl + %mask = fcmp one <8 x double> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x double>* + %r = load <8 x double>, <8 x double>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x double> %r, <8 x double> %old + ret <8 x double>%res +} + +define <8 x double> @test46(i8 * %addr, <8 x double> %mask1) { +; X64-LABEL: test46: +; X64: # %bb.0: +; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X64-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 +; X64-NEXT: vmovupd (%rdi), %zmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test46: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X86-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 +; X86-NEXT: vmovupd (%eax), %zmm0 {%k1} {z} +; X86-NEXT: retl + %mask = fcmp one <8 x double> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x double>* + %r = load <8 x double>, <8 x double>* %vaddr, align 64 + %res = select <8 x i1> %mask, <8 x double> %r, <8 x double> zeroinitializer + ret <8 x double>%res +} + +define <8 x double> @test47(i8 * %addr, <8 x double> %mask1) { +; X64-LABEL: test47: +; X64: # %bb.0: +; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X64-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 +; X64-NEXT: vmovupd (%rdi), %zmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test47: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X86-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 +; X86-NEXT: vmovupd (%eax), %zmm0 {%k1} {z} +; X86-NEXT: retl + %mask = fcmp one <8 x double> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x double>* + %r = load <8 x double>, <8 x double>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x double> %r, <8 x double> zeroinitializer + ret <8 x double>%res +} diff --git a/llvm/test/CodeGen/X86/avx512vl-unaligned-load-store.ll b/llvm/test/CodeGen/X86/avx512vl-unaligned-load-store.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512vl-unaligned-load-store.ll @@ -0,0 +1,747 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+use-unaligned-vector-move | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f,+avx512vl,+use-unaligned-vector-move | FileCheck %s -check-prefix=X86 + +define <8 x i32> @test_256_1(i8 * %addr) { +; CHECK-LABEL: test_256_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: retq +; X64-LABEL: test_256_1: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: retq +; +; X86-LABEL: test_256_1: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %ymm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x i32>* + %res = load <8 x i32>, <8 x i32>* %vaddr, align 1 + ret <8 x i32>%res +} + +define <8 x i32> @test_256_2(i8 * %addr) { +; CHECK-LABEL: test_256_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: retq +; X64-LABEL: test_256_2: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: retq +; +; X86-LABEL: test_256_2: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %ymm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x i32>* + %res = load <8 x i32>, <8 x i32>* %vaddr, align 32 + ret <8 x i32>%res +} + +define void @test_256_3(i8 * %addr, <4 x i64> %data) { +; CHECK-LABEL: test_256_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; X64-LABEL: test_256_3: +; X64: # %bb.0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test_256_3: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <4 x i64>* + store <4 x i64>%data, <4 x i64>* %vaddr, align 32 + ret void +} + +define void @test_256_4(i8 * %addr, <8 x i32> %data) { +; CHECK-LABEL: test_256_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; X64-LABEL: test_256_4: +; X64: # %bb.0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test_256_4: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x i32>* + store <8 x i32>%data, <8 x i32>* %vaddr, align 1 + ret void +} + +define void @test_256_5(i8 * %addr, <8 x i32> %data) { +; CHECK-LABEL: test_256_5: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; X64-LABEL: test_256_5: +; X64: # %bb.0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test_256_5: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x i32>* + store <8 x i32>%data, <8 x i32>* %vaddr, align 32 + ret void +} + +define <4 x i64> @test_256_6(i8 * %addr) { +; CHECK-LABEL: test_256_6: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: retq +; X64-LABEL: test_256_6: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: retq +; +; X86-LABEL: test_256_6: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %ymm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <4 x i64>* + %res = load <4 x i64>, <4 x i64>* %vaddr, align 32 + ret <4 x i64>%res +} + +define void @test_256_7(i8 * %addr, <4 x i64> %data) { +; CHECK-LABEL: test_256_7: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; X64-LABEL: test_256_7: +; X64: # %bb.0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test_256_7: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <4 x i64>* + store <4 x i64>%data, <4 x i64>* %vaddr, align 1 + ret void +} + +define <4 x i64> @test_256_8(i8 * %addr) { +; CHECK-LABEL: test_256_8: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: retq +; X64-LABEL: test_256_8: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: retq +; +; X86-LABEL: test_256_8: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %ymm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <4 x i64>* + %res = load <4 x i64>, <4 x i64>* %vaddr, align 1 + ret <4 x i64>%res +} + +define void @test_256_9(i8 * %addr, <4 x double> %data) { +; CHECK-LABEL: test_256_9: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; X64-LABEL: test_256_9: +; X64: # %bb.0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test_256_9: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <4 x double>* + store <4 x double>%data, <4 x double>* %vaddr, align 32 + ret void +} + +define <4 x double> @test_256_10(i8 * %addr) { +; CHECK-LABEL: test_256_10: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: retq +; X64-LABEL: test_256_10: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: retq +; +; X86-LABEL: test_256_10: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %ymm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <4 x double>* + %res = load <4 x double>, <4 x double>* %vaddr, align 32 + ret <4 x double>%res +} + +define void @test_256_11(i8 * %addr, <8 x float> %data) { +; CHECK-LABEL: test_256_11: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; X64-LABEL: test_256_11: +; X64: # %bb.0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test_256_11: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x float>* + store <8 x float>%data, <8 x float>* %vaddr, align 32 + ret void +} + +define <8 x float> @test_256_12(i8 * %addr) { +; CHECK-LABEL: test_256_12: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: retq +; X64-LABEL: test_256_12: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: retq +; +; X86-LABEL: test_256_12: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %ymm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x float>* + %res = load <8 x float>, <8 x float>* %vaddr, align 32 + ret <8 x float>%res +} + +define void @test_256_13(i8 * %addr, <4 x double> %data) { +; CHECK-LABEL: test_256_13: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; X64-LABEL: test_256_13: +; X64: # %bb.0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test_256_13: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <4 x double>* + store <4 x double>%data, <4 x double>* %vaddr, align 1 + ret void +} + +define <4 x double> @test_256_14(i8 * %addr) { +; CHECK-LABEL: test_256_14: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: retq +; X64-LABEL: test_256_14: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: retq +; +; X86-LABEL: test_256_14: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %ymm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <4 x double>* + %res = load <4 x double>, <4 x double>* %vaddr, align 1 + ret <4 x double>%res +} + +define void @test_256_15(i8 * %addr, <8 x float> %data) { +; CHECK-LABEL: test_256_15: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; X64-LABEL: test_256_15: +; X64: # %bb.0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq +; +; X86-LABEL: test_256_15: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x float>* + store <8 x float>%data, <8 x float>* %vaddr, align 1 + ret void +} + +define <8 x float> @test_256_16(i8 * %addr) { +; CHECK-LABEL: test_256_16: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: retq +; X64-LABEL: test_256_16: +; X64: # %bb.0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: retq +; +; X86-LABEL: test_256_16: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups (%eax), %ymm0 +; X86-NEXT: retl + %vaddr = bitcast i8* %addr to <8 x float>* + %res = load <8 x float>, <8 x float>* %vaddr, align 1 + ret <8 x float>%res +} + +define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) { +; CHECK-LABEL: test_256_17: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmd %ymm1, %ymm1, %k1 +; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq +; X64-LABEL: test_256_17: +; X64: # %bb.0: +; X64-NEXT: vptestmd %ymm1, %ymm1, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test_256_17: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %ymm1, %ymm1, %k1 +; X86-NEXT: vmovdqu32 (%eax), %ymm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i32>* + %r = load <8 x i32>, <8 x i32>* %vaddr, align 32 + %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old + ret <8 x i32>%res +} + +define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) { +; CHECK-LABEL: test_256_18: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmd %ymm1, %ymm1, %k1 +; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq +; X64-LABEL: test_256_18: +; X64: # %bb.0: +; X64-NEXT: vptestmd %ymm1, %ymm1, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test_256_18: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %ymm1, %ymm1, %k1 +; X86-NEXT: vmovdqu32 (%eax), %ymm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i32>* + %r = load <8 x i32>, <8 x i32>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old + ret <8 x i32>%res +} + +define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) { +; CHECK-LABEL: test_256_19: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmd %ymm0, %ymm0, %k1 +; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq +; X64-LABEL: test_256_19: +; X64: # %bb.0: +; X64-NEXT: vptestmd %ymm0, %ymm0, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test_256_19: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %ymm0, %ymm0, %k1 +; X86-NEXT: vmovdqu32 (%eax), %ymm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i32>* + %r = load <8 x i32>, <8 x i32>* %vaddr, align 32 + %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer + ret <8 x i32>%res +} + +define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) { +; CHECK-LABEL: test_256_20: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmd %ymm0, %ymm0, %k1 +; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq +; X64-LABEL: test_256_20: +; X64: # %bb.0: +; X64-NEXT: vptestmd %ymm0, %ymm0, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test_256_20: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %ymm0, %ymm0, %k1 +; X86-NEXT: vmovdqu32 (%eax), %ymm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i32>* + %r = load <8 x i32>, <8 x i32>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer + ret <8 x i32>%res +} + +define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) { +; CHECK-LABEL: test_256_21: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmq %ymm1, %ymm1, %k1 +; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq +; X64-LABEL: test_256_21: +; X64: # %bb.0: +; X64-NEXT: vptestmq %ymm1, %ymm1, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test_256_21: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %ymm1, %ymm1, %k1 +; X86-NEXT: vmovdqu64 (%eax), %ymm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i64>* + %r = load <4 x i64>, <4 x i64>* %vaddr, align 32 + %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old + ret <4 x i64>%res +} + +define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) { +; CHECK-LABEL: test_256_22: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmq %ymm1, %ymm1, %k1 +; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq +; X64-LABEL: test_256_22: +; X64: # %bb.0: +; X64-NEXT: vptestmq %ymm1, %ymm1, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test_256_22: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %ymm1, %ymm1, %k1 +; X86-NEXT: vmovdqu64 (%eax), %ymm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i64>* + %r = load <4 x i64>, <4 x i64>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old + ret <4 x i64>%res +} + +define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) { +; CHECK-LABEL: test_256_23: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmq %ymm0, %ymm0, %k1 +; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq +; X64-LABEL: test_256_23: +; X64: # %bb.0: +; X64-NEXT: vptestmq %ymm0, %ymm0, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test_256_23: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %ymm0, %ymm0, %k1 +; X86-NEXT: vmovdqu64 (%eax), %ymm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i64>* + %r = load <4 x i64>, <4 x i64>* %vaddr, align 32 + %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer + ret <4 x i64>%res +} + +define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) { +; CHECK-LABEL: test_256_24: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmq %ymm0, %ymm0, %k1 +; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq +; X64-LABEL: test_256_24: +; X64: # %bb.0: +; X64-NEXT: vptestmq %ymm0, %ymm0, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test_256_24: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %ymm0, %ymm0, %k1 +; X86-NEXT: vmovdqu64 (%eax), %ymm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i64>* + %r = load <4 x i64>, <4 x i64>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer + ret <4 x i64>%res +} + +define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) { +; CHECK-LABEL: test_128_17: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 +; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq +; X64-LABEL: test_128_17: +; X64: # %bb.0: +; X64-NEXT: vptestmd %xmm1, %xmm1, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test_128_17: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %xmm1, %xmm1, %k1 +; X86-NEXT: vmovdqu32 (%eax), %xmm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i32>* + %r = load <4 x i32>, <4 x i32>* %vaddr, align 16 + %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old + ret <4 x i32>%res +} + +define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) { +; CHECK-LABEL: test_128_18: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 +; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq +; X64-LABEL: test_128_18: +; X64: # %bb.0: +; X64-NEXT: vptestmd %xmm1, %xmm1, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test_128_18: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %xmm1, %xmm1, %k1 +; X86-NEXT: vmovdqu32 (%eax), %xmm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i32>* + %r = load <4 x i32>, <4 x i32>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old + ret <4 x i32>%res +} + +define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) { +; CHECK-LABEL: test_128_19: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k1 +; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq +; X64-LABEL: test_128_19: +; X64: # %bb.0: +; X64-NEXT: vptestmd %xmm0, %xmm0, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test_128_19: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %xmm0, %xmm0, %k1 +; X86-NEXT: vmovdqu32 (%eax), %xmm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i32>* + %r = load <4 x i32>, <4 x i32>* %vaddr, align 16 + %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer + ret <4 x i32>%res +} + +define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) { +; CHECK-LABEL: test_128_20: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k1 +; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq +; X64-LABEL: test_128_20: +; X64: # %bb.0: +; X64-NEXT: vptestmd %xmm0, %xmm0, %k1 +; X64-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test_128_20: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmd %xmm0, %xmm0, %k1 +; X86-NEXT: vmovdqu32 (%eax), %xmm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i32>* + %r = load <4 x i32>, <4 x i32>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer + ret <4 x i32>%res +} + +define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) { +; CHECK-LABEL: test_128_21: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmq %xmm1, %xmm1, %k1 +; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq +; X64-LABEL: test_128_21: +; X64: # %bb.0: +; X64-NEXT: vptestmq %xmm1, %xmm1, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test_128_21: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %xmm1, %xmm1, %k1 +; X86-NEXT: vmovdqu64 (%eax), %xmm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x i64>* + %r = load <2 x i64>, <2 x i64>* %vaddr, align 16 + %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old + ret <2 x i64>%res +} + +define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) { +; CHECK-LABEL: test_128_22: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmq %xmm1, %xmm1, %k1 +; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq +; X64-LABEL: test_128_22: +; X64: # %bb.0: +; X64-NEXT: vptestmq %xmm1, %xmm1, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} +; X64-NEXT: retq +; +; X86-LABEL: test_128_22: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %xmm1, %xmm1, %k1 +; X86-NEXT: vmovdqu64 (%eax), %xmm0 {%k1} +; X86-NEXT: retl + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x i64>* + %r = load <2 x i64>, <2 x i64>* %vaddr, align 1 + %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old + ret <2 x i64>%res +} + +define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) { +; CHECK-LABEL: test_128_23: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k1 +; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq +; X64-LABEL: test_128_23: +; X64: # %bb.0: +; X64-NEXT: vptestmq %xmm0, %xmm0, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test_128_23: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %xmm0, %xmm0, %k1 +; X86-NEXT: vmovdqu64 (%eax), %xmm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x i64>* + %r = load <2 x i64>, <2 x i64>* %vaddr, align 16 + %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer + ret <2 x i64>%res +} + +define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) { +; CHECK-LABEL: test_128_24: +; CHECK: # %bb.0: +; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k1 +; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq +; X64-LABEL: test_128_24: +; X64: # %bb.0: +; X64-NEXT: vptestmq %xmm0, %xmm0, %k1 +; X64-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} {z} +; X64-NEXT: retq +; +; X86-LABEL: test_128_24: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vptestmq %xmm0, %xmm0, %k1 +; X86-NEXT: vmovdqu64 (%eax), %xmm0 {%k1} {z} +; X86-NEXT: retl + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x i64>* + %r = load <2 x i64>, <2 x i64>* %vaddr, align 1 + %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer + ret <2 x i64>%res +}