diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt --- a/llvm/lib/Target/X86/CMakeLists.txt +++ b/llvm/lib/Target/X86/CMakeLists.txt @@ -70,6 +70,7 @@ X86TargetMachine.cpp X86TargetObjectFile.cpp X86TargetTransformInfo.cpp + X86UnalignedVectorMoves.cpp X86VZeroUpper.cpp X86WinAllocaExpander.cpp X86WinEHState.cpp diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h --- a/llvm/lib/Target/X86/X86.h +++ b/llvm/lib/Target/X86/X86.h @@ -141,6 +141,7 @@ FunctionPass *createX86LoadValueInjectionRetHardeningPass(); FunctionPass *createX86SpeculativeLoadHardeningPass(); FunctionPass *createX86SpeculativeExecutionSideEffectSuppression(); +FunctionPass *createX86UnalignedVectorMoves(); void initializeEvexToVexInstPassPass(PassRegistry &); void initializeFixupBWInstPassPass(PassRegistry &); @@ -162,6 +163,7 @@ void initializeX86PartialReductionPass(PassRegistry &); void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &); void initializeX86SpeculativeExecutionSideEffectSuppressionPass(PassRegistry &); +void initializeX86UnalignedVectorMovePassPass(PassRegistry &); namespace X86AS { enum : unsigned { diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp --- a/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -83,6 +83,7 @@ initializeX86LoadValueInjectionRetHardeningPassPass(PR); initializeX86OptimizeLEAPassPass(PR); initializeX86PartialReductionPass(PR); + initializeX86UnalignedVectorMovePassPass(PR); } static std::unique_ptr createTLOF(const Triple &TT) { @@ -524,6 +525,7 @@ addPass(createX86PadShortFunctions()); addPass(createX86FixupLEAs()); } + addPass(createX86UnalignedVectorMoves()); addPass(createX86EvexToVexInsts()); addPass(createX86DiscriminateMemOpsPass()); addPass(createX86InsertPrefetchPass()); diff --git a/llvm/lib/Target/X86/X86UnalignedVectorMoves.cpp b/llvm/lib/Target/X86/X86UnalignedVectorMoves.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/X86/X86UnalignedVectorMoves.cpp @@ -0,0 +1,188 @@ +//===- X86UnalignedVectorMoves.cpp ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This file defines the pass that replace movaps with movups. movups +/// achieve the same performance as movaps does when the address is aligned. +/// If the address is not aligned, movups can run without raising exception, +/// but movaps raise exception. Sometimes user want to suppress the exception, +/// so an option is provided for this purpose. +// +//===----------------------------------------------------------------------===// + +#include "X86.h" +#include "X86InstrInfo.h" +#include "X86Subtarget.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/Pass.h" +#include +#include + +using namespace llvm; + +#define UNALIGNED_VEC_MOV_DESC "X86 unaligned vector move" +#define DEBUG_TYPE "x86-unaligned-vector-move" + +static cl::opt EnableX86UnalignedVecMov( + "enable-x86-unaligned-vector-move", cl::Hidden, + cl::desc("X86: Enable transforming aligned vector move instruction to " + "unaligned vector move."), + cl::init(false)); + +namespace { + +class X86UnalignedVectorMovePass : public MachineFunctionPass { + + bool MovapsToMovupsImpl(MachineInstr &MI) const; + +public: + static char ID; + + X86UnalignedVectorMovePass() : MachineFunctionPass(ID) {} + + StringRef getPassName() const override { return UNALIGNED_VEC_MOV_DESC; } + + bool runOnMachineFunction(MachineFunction &MF) override; + + // This pass runs after regalloc and doesn't support VReg operands. + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties().set( + MachineFunctionProperties::Property::NoVRegs); + } + +private: + /// Machine instruction info used throughout the class. + const X86InstrInfo *TII = nullptr; +}; + +} // end anonymous namespace + +char X86UnalignedVectorMovePass::ID = 0; + +bool X86UnalignedVectorMovePass::runOnMachineFunction(MachineFunction &MF) { + if (!EnableX86UnalignedVecMov) + return false; + + const X86Subtarget &ST = MF.getSubtarget(); + TII = ST.getInstrInfo(); + if (!ST.hasAVX()) + return false; + + bool Changed = false; + + /// Go over all basic blocks in function and replace + /// movaps with movups when possible. + for (MachineBasicBlock &MBB : MF) { + + // Traverse the basic block. + for (MachineInstr &MI : MBB) + Changed |= MovapsToMovupsImpl(MI); + } + + return Changed; +} + +bool X86UnalignedVectorMovePass::MovapsToMovupsImpl(MachineInstr &MI) const { + unsigned Opc = MI.getOpcode(); + unsigned NewOpc; + + switch (Opc) { + default: + return false; + case X86::MOVAPSmr: + NewOpc = X86::MOVUPSmr; + break; + case X86::MOVAPSrm: + NewOpc = X86::MOVUPSrm; + break; + case X86::VMOVAPSYmr: + NewOpc = X86::VMOVUPSYmr; + break; + case X86::VMOVAPSYrm: + NewOpc = X86::VMOVUPSYrm; + break; + case X86::VMOVAPSZ128mr: + NewOpc = X86::VMOVUPSZ128mr; + break; + case X86::VMOVAPSZ128mr_NOVLX: + NewOpc = X86::VMOVUPSZ128mr_NOVLX; + break; + case X86::VMOVAPSZ128mrk: + NewOpc = X86::VMOVUPSZ128mrk; + break; + case X86::VMOVAPSZ128rm: + NewOpc = X86::VMOVUPSZ128rm; + break; + case X86::VMOVAPSZ128rm_NOVLX: + NewOpc = X86::VMOVUPSZ128rm_NOVLX; + break; + case X86::VMOVAPSZ128rmk: + NewOpc = X86::VMOVUPSZ128rmk; + break; + case X86::VMOVAPSZ128rmkz: + NewOpc = X86::VMOVUPSZ128rmkz; + break; + case X86::VMOVAPSZ256mr: + NewOpc = X86::VMOVUPSZ256mr; + break; + case X86::VMOVAPSZ256mr_NOVLX: + NewOpc = X86::VMOVUPSZ256mr_NOVLX; + break; + case X86::VMOVAPSZ256mrk: + NewOpc = X86::VMOVUPSZ256mrk; + break; + case X86::VMOVAPSZ256rm: + NewOpc = X86::VMOVUPSZ256rm; + break; + case X86::VMOVAPSZ256rm_NOVLX: + NewOpc = X86::VMOVUPSZ256rm_NOVLX; + break; + case X86::VMOVAPSZ256rmk: + NewOpc = X86::VMOVUPSZ256rmk; + break; + case X86::VMOVAPSZ256rmkz: + NewOpc = X86::VMOVUPSZ256rmkz; + break; + case X86::VMOVAPSZmr: + NewOpc = X86::VMOVUPSZmr; + break; + case X86::VMOVAPSZmrk: + NewOpc = X86::VMOVUPSZmrk; + break; + case X86::VMOVAPSZrm: + NewOpc = X86::VMOVUPSZrm; + break; + case X86::VMOVAPSZrmk: + NewOpc = X86::VMOVUPSZrmk; + break; + case X86::VMOVAPSZrmkz: + NewOpc = X86::VMOVUPSZrmkz; + break; + case X86::VMOVAPSmr: + NewOpc = X86::VMOVUPSmr; + break; + case X86::VMOVAPSrm: + NewOpc = X86::VMOVUPSrm; + break; + } + + MI.setDesc(TII->get(NewOpc)); + return true; +} + +INITIALIZE_PASS(X86UnalignedVectorMovePass, DEBUG_TYPE, UNALIGNED_VEC_MOV_DESC, + false, false) + +FunctionPass *llvm::createX86UnalignedVectorMoves() { + return new X86UnalignedVectorMovePass(); +} diff --git a/llvm/test/CodeGen/X86/O0-pipeline.ll b/llvm/test/CodeGen/X86/O0-pipeline.ll --- a/llvm/test/CodeGen/X86/O0-pipeline.ll +++ b/llvm/test/CodeGen/X86/O0-pipeline.ll @@ -58,6 +58,7 @@ ; CHECK-NEXT: Implement the 'patchable-function' attribute ; CHECK-NEXT: X86 Indirect Branch Tracking ; CHECK-NEXT: X86 vzeroupper inserter +; CHECK-NEXT: X86 unaligned vector move ; CHECK-NEXT: Compressing EVEX instrs to VEX encoding when possibl ; CHECK-NEXT: X86 Discriminate Memory Operands ; CHECK-NEXT: X86 Insert Cache Prefetches diff --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll --- a/llvm/test/CodeGen/X86/opt-pipeline.ll +++ b/llvm/test/CodeGen/X86/opt-pipeline.ll @@ -186,6 +186,7 @@ ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: X86 Atom pad short functions ; CHECK-NEXT: X86 LEA Fixup +; CHECK-NEXT: X86 unaligned vector move ; CHECK-NEXT: Compressing EVEX instrs to VEX encoding when possible ; CHECK-NEXT: X86 Discriminate Memory Operands ; CHECK-NEXT: X86 Insert Cache Prefetches diff --git a/llvm/test/CodeGen/X86/unaligned-load-store.ll b/llvm/test/CodeGen/X86/unaligned-load-store.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/unaligned-load-store.ll @@ -0,0 +1,442 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2,slow-unaligned-mem-16 -enable-x86-unaligned-vector-move | FileCheck %s -check-prefix=CHECK_SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 -enable-x86-unaligned-vector-move | FileCheck %s +; RUN: llc -O0 < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2,slow-unaligned-mem-16 -enable-x86-unaligned-vector-move | FileCheck %s -check-prefix=CHECK_SSE_O0 +; RUN: llc -O0 < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 -enable-x86-unaligned-vector-move | FileCheck %s -check-prefix=CHECK_O0 + +define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind { +; CHECK_SSE-LABEL: test_256_load: +; CHECK_SSE: # %bb.0: # %entry +; CHECK_SSE-NEXT: pushq %r15 +; CHECK_SSE-NEXT: pushq %r14 +; CHECK_SSE-NEXT: pushq %rbx +; CHECK_SSE-NEXT: subq $96, %rsp +; CHECK_SSE-NEXT: movq %rdx, %r14 +; CHECK_SSE-NEXT: movq %rsi, %r15 +; CHECK_SSE-NEXT: movq %rdi, %rbx +; CHECK_SSE-NEXT: movaps (%rdx), %xmm4 +; CHECK_SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps 16(%rdx), %xmm5 +; CHECK_SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps (%rsi), %xmm2 +; CHECK_SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps 16(%rsi), %xmm3 +; CHECK_SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps (%rdi), %xmm0 +; CHECK_SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE-NEXT: movaps 16(%rdi), %xmm1 +; CHECK_SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; CHECK_SSE-NEXT: callq dummy +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, (%rbx) +; CHECK_SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, 16(%rbx) +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, (%r15) +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, 16(%r15) +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, (%r14) +; CHECK_SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE-NEXT: movaps %xmm0, 16(%r14) +; CHECK_SSE-NEXT: addq $96, %rsp +; CHECK_SSE-NEXT: popq %rbx +; CHECK_SSE-NEXT: popq %r14 +; CHECK_SSE-NEXT: popq %r15 +; CHECK_SSE-NEXT: retq +; +; CHECK-LABEL: test_256_load: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %r15 +; CHECK-NEXT: pushq %r14 +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: subq $96, %rsp +; CHECK-NEXT: movq %rdx, %r14 +; CHECK-NEXT: movq %rsi, %r15 +; CHECK-NEXT: movq %rdi, %rbx +; CHECK-NEXT: vmovups (%rdi), %ymm0 +; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vmovups (%rsi), %ymm1 +; CHECK-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vmovups (%rdx), %ymm2 +; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill +; CHECK-NEXT: callq dummy +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vmovups %ymm0, (%rbx) +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vmovups %ymm0, (%r15) +; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload +; CHECK-NEXT: vmovups %ymm0, (%r14) +; CHECK-NEXT: addq $96, %rsp +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: popq %r14 +; CHECK-NEXT: popq %r15 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; +; CHECK_SSE_O0-LABEL: test_256_load: +; CHECK_SSE_O0: # %bb.0: # %entry +; CHECK_SSE_O0-NEXT: subq $136, %rsp +; CHECK_SSE_O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK_SSE_O0-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK_SSE_O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK_SSE_O0-NEXT: movapd (%rdi), %xmm0 +; CHECK_SSE_O0-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE_O0-NEXT: movapd 16(%rdi), %xmm1 +; CHECK_SSE_O0-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; CHECK_SSE_O0-NEXT: movaps (%rsi), %xmm2 +; CHECK_SSE_O0-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE_O0-NEXT: movaps 16(%rsi), %xmm3 +; CHECK_SSE_O0-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE_O0-NEXT: movaps (%rdx), %xmm4 +; CHECK_SSE_O0-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE_O0-NEXT: movaps 16(%rdx), %xmm5 +; CHECK_SSE_O0-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK_SSE_O0-NEXT: callq dummy +; CHECK_SSE_O0-NEXT: movaps (%rsp), %xmm5 # 16-byte Reload +; CHECK_SSE_O0-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload +; CHECK_SSE_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload +; CHECK_SSE_O0-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; CHECK_SSE_O0-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload +; CHECK_SSE_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; CHECK_SSE_O0-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK_SSE_O0-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK_SSE_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload +; CHECK_SSE_O0-NEXT: movapd %xmm5, 16(%rdi) +; CHECK_SSE_O0-NEXT: movapd %xmm4, (%rdi) +; CHECK_SSE_O0-NEXT: movaps %xmm3, 16(%rsi) +; CHECK_SSE_O0-NEXT: movaps %xmm2, (%rsi) +; CHECK_SSE_O0-NEXT: movaps %xmm1, 16(%rdx) +; CHECK_SSE_O0-NEXT: movaps %xmm0, (%rdx) +; CHECK_SSE_O0-NEXT: addq $136, %rsp +; CHECK_SSE_O0-NEXT: retq +; +; CHECK_O0-LABEL: test_256_load: +; CHECK_O0: # %bb.0: # %entry +; CHECK_O0-NEXT: subq $184, %rsp +; CHECK_O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK_O0-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK_O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK_O0-NEXT: vmovapd (%rdi), %ymm0 +; CHECK_O0-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill +; CHECK_O0-NEXT: vmovups (%rsi), %ymm1 +; CHECK_O0-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK_O0-NEXT: vmovdqa (%rdx), %ymm2 +; CHECK_O0-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK_O0-NEXT: callq dummy +; CHECK_O0-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload +; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload +; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload +; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload +; CHECK_O0-NEXT: vmovapd %ymm2, (%rdi) +; CHECK_O0-NEXT: vmovups %ymm1, (%rsi) +; CHECK_O0-NEXT: vmovdqa %ymm0, (%rdx) +; CHECK_O0-NEXT: addq $184, %rsp +; CHECK_O0-NEXT: vzeroupper +; CHECK_O0-NEXT: retq +entry: + %0 = bitcast double* %d to <4 x double>* + %tmp1.i = load <4 x double>, <4 x double>* %0, align 32 + %1 = bitcast float* %f to <8 x float>* + %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32 + %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32 + tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind + store <4 x double> %tmp1.i, <4 x double>* %0, align 32 + store <8 x float> %tmp1.i17, <8 x float>* %1, align 32 + store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32 + ret void +} + +declare void @dummy(<4 x double>, <8 x float>, <4 x i64>) + +define void @storev16i16(<16 x i16> %a) nounwind { +; CHECK_SSE-LABEL: storev16i16: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps %xmm1, (%rax) +; CHECK_SSE-NEXT: movaps %xmm0, (%rax) +; +; CHECK-LABEL: storev16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rax) +; +; CHECK_SSE_O0-LABEL: storev16i16: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: # implicit-def: $rax +; CHECK_SSE_O0-NEXT: movaps %xmm0, (%rax) +; CHECK_SSE_O0-NEXT: # implicit-def: $rax +; CHECK_SSE_O0-NEXT: movaps %xmm1, (%rax) +; +; CHECK_O0-LABEL: storev16i16: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: # implicit-def: $rax +; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax) + store <16 x i16> %a, <16 x i16>* undef, align 32 + unreachable +} + +define void @storev16i16_01(<16 x i16> %a) nounwind { +; CHECK_SSE-LABEL: storev16i16_01: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movups %xmm1, (%rax) +; CHECK_SSE-NEXT: movups %xmm0, (%rax) +; +; CHECK-LABEL: storev16i16_01: +; CHECK: # %bb.0: +; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax) +; CHECK-NEXT: vmovups %xmm0, (%rax) +; +; CHECK_SSE_O0-LABEL: storev16i16_01: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: # implicit-def: $rax +; CHECK_SSE_O0-NEXT: movups %xmm0, (%rax) +; CHECK_SSE_O0-NEXT: # implicit-def: $rax +; CHECK_SSE_O0-NEXT: movups %xmm1, (%rax) +; +; CHECK_O0-LABEL: storev16i16_01: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: # implicit-def: $rax +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax) + store <16 x i16> %a, <16 x i16>* undef, align 4 + unreachable +} + +define void @storev32i8(<32 x i8> %a) nounwind { +; CHECK_SSE-LABEL: storev32i8: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps %xmm1, (%rax) +; CHECK_SSE-NEXT: movaps %xmm0, (%rax) +; +; CHECK-LABEL: storev32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %ymm0, (%rax) +; +; CHECK_SSE_O0-LABEL: storev32i8: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: # implicit-def: $rax +; CHECK_SSE_O0-NEXT: movaps %xmm0, (%rax) +; CHECK_SSE_O0-NEXT: # implicit-def: $rax +; CHECK_SSE_O0-NEXT: movaps %xmm1, (%rax) +; +; CHECK_O0-LABEL: storev32i8: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: # implicit-def: $rax +; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax) + store <32 x i8> %a, <32 x i8>* undef, align 32 + unreachable +} + +define void @storev32i8_01(<32 x i8> %a) nounwind { +; CHECK_SSE-LABEL: storev32i8_01: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movups %xmm1, (%rax) +; CHECK_SSE-NEXT: movups %xmm0, (%rax) +; +; CHECK-LABEL: storev32i8_01: +; CHECK: # %bb.0: +; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax) +; CHECK-NEXT: vmovups %xmm0, (%rax) +; +; CHECK_SSE_O0-LABEL: storev32i8_01: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: # implicit-def: $rax +; CHECK_SSE_O0-NEXT: movups %xmm0, (%rax) +; CHECK_SSE_O0-NEXT: # implicit-def: $rax +; CHECK_SSE_O0-NEXT: movups %xmm1, (%rax) +; +; CHECK_O0-LABEL: storev32i8_01: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: # implicit-def: $rax +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax) + store <32 x i8> %a, <32 x i8>* undef, align 4 + unreachable +} + +; It is faster to make two saves, if the data is already in xmm registers. For +; example, after making an integer operation. +define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp { +; CHECK_SSE-LABEL: double_save: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK-LABEL: double_save: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups %xmm1, 16(%rdi) +; CHECK-NEXT: vmovups %xmm0, (%rdi) +; CHECK-NEXT: retq +; +; CHECK_SSE_O0-LABEL: double_save: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE_O0-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE_O0-NEXT: retq +; +; CHECK_O0-LABEL: double_save: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 +; CHECK_O0-NEXT: # implicit-def: $ymm0 +; CHECK_O0-NEXT: vmovaps %xmm2, %xmm0 +; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) +; CHECK_O0-NEXT: vzeroupper +; CHECK_O0-NEXT: retq + %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> + store <8 x i32> %Z, <8 x i32>* %P, align 16 + ret void +} + +define void @double_save_volatile(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind { +; CHECK_SSE-LABEL: double_save_volatile: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK-LABEL: double_save_volatile: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; +; CHECK_SSE_O0-LABEL: double_save_volatile: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE_O0-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE_O0-NEXT: retq +; +; CHECK_O0-LABEL: double_save_volatile: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 +; CHECK_O0-NEXT: # implicit-def: $ymm0 +; CHECK_O0-NEXT: vmovaps %xmm2, %xmm0 +; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) +; CHECK_O0-NEXT: vzeroupper +; CHECK_O0-NEXT: retq + %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> + store volatile <8 x i32> %Z, <8 x i32>* %P, align 16 + ret void +} + +define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind { +; CHECK_SSE-LABEL: add8i32: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movups (%rsi), %xmm0 +; CHECK_SSE-NEXT: movups 16(%rsi), %xmm1 +; CHECK_SSE-NEXT: movups %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: movups %xmm0, (%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK-LABEL: add8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rsi), %xmm0 +; CHECK-NEXT: vmovups 16(%rsi), %xmm1 +; CHECK-NEXT: vmovups %xmm1, 16(%rdi) +; CHECK-NEXT: vmovups %xmm0, (%rdi) +; CHECK-NEXT: retq +; +; CHECK_SSE_O0-LABEL: add8i32: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: movups (%rsi), %xmm0 +; CHECK_SSE_O0-NEXT: movups 16(%rsi), %xmm1 +; CHECK_SSE_O0-NEXT: movups %xmm1, 16(%rdi) +; CHECK_SSE_O0-NEXT: movups %xmm0, (%rdi) +; CHECK_SSE_O0-NEXT: retq +; +; CHECK_O0-LABEL: add8i32: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm2 +; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1 +; CHECK_O0-NEXT: # implicit-def: $ymm0 +; CHECK_O0-NEXT: vmovaps %xmm2, %xmm0 +; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) +; CHECK_O0-NEXT: vzeroupper +; CHECK_O0-NEXT: retq + %b = load <8 x i32>, <8 x i32>* %bp, align 1 + %x = add <8 x i32> zeroinitializer, %b + store <8 x i32> %x, <8 x i32>* %ret, align 1 + ret void +} + +define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind { +; CHECK_SSE-LABEL: add4i64a64: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps (%rsi), %xmm0 +; CHECK_SSE-NEXT: movaps 16(%rsi), %xmm1 +; CHECK_SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK-LABEL: add4i64a64: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rsi), %ymm0 +; CHECK-NEXT: vmovups %ymm0, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +; +; CHECK_SSE_O0-LABEL: add4i64a64: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: movaps (%rsi), %xmm0 +; CHECK_SSE_O0-NEXT: movaps 16(%rsi), %xmm1 +; CHECK_SSE_O0-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE_O0-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE_O0-NEXT: retq +; +; CHECK_O0-LABEL: add4i64a64: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: vmovups (%rsi), %ymm0 +; CHECK_O0-NEXT: vmovdqa %ymm0, (%rdi) +; CHECK_O0-NEXT: vzeroupper +; CHECK_O0-NEXT: retq + %b = load <4 x i64>, <4 x i64>* %bp, align 64 + %x = add <4 x i64> zeroinitializer, %b + store <4 x i64> %x, <4 x i64>* %ret, align 64 + ret void +} + +define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind { +; CHECK_SSE-LABEL: add4i64a16: +; CHECK_SSE: # %bb.0: +; CHECK_SSE-NEXT: movaps (%rsi), %xmm0 +; CHECK_SSE-NEXT: movaps 16(%rsi), %xmm1 +; CHECK_SSE-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE-NEXT: retq +; +; CHECK-LABEL: add4i64a16: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovups (%rsi), %xmm0 +; CHECK-NEXT: vmovups 16(%rsi), %xmm1 +; CHECK-NEXT: vmovups %xmm1, 16(%rdi) +; CHECK-NEXT: vmovups %xmm0, (%rdi) +; CHECK-NEXT: retq +; +; CHECK_SSE_O0-LABEL: add4i64a16: +; CHECK_SSE_O0: # %bb.0: +; CHECK_SSE_O0-NEXT: movaps (%rsi), %xmm0 +; CHECK_SSE_O0-NEXT: movaps 16(%rsi), %xmm1 +; CHECK_SSE_O0-NEXT: movaps %xmm1, 16(%rdi) +; CHECK_SSE_O0-NEXT: movaps %xmm0, (%rdi) +; CHECK_SSE_O0-NEXT: retq +; +; CHECK_O0-LABEL: add4i64a16: +; CHECK_O0: # %bb.0: +; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm2 +; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1 +; CHECK_O0-NEXT: # implicit-def: $ymm0 +; CHECK_O0-NEXT: vmovaps %xmm2, %xmm0 +; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) +; CHECK_O0-NEXT: vzeroupper +; CHECK_O0-NEXT: retq + %b = load <4 x i64>, <4 x i64>* %bp, align 16 + %x = add <4 x i64> zeroinitializer, %b + store <4 x i64> %x, <4 x i64>* %ret, align 16 + ret void +}