diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -401,8 +401,10 @@ /// efficiently, casting the load to a smaller vector of larger types and /// loading is more efficient, however, this can be undone by optimizations in /// dag combiner. - virtual bool isLoadBitCastBeneficial(EVT LoadVT, - EVT BitcastVT) const { + virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, + bool &CheckAlignment) const { + CheckAlignment = true; + // Don't do if we could do an indexed load on the original type, but not on // the new one. if (!LoadVT.isSimple() || !BitcastVT.isSimple()) @@ -421,9 +423,10 @@ /// Return true if the following transform is beneficial: /// (store (y (conv x)), y*)) -> (store x, (x*)) - virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const { + virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, + bool &CheckAlignment) const { // Default to the same logic as loads. - return isLoadBitCastBeneficial(StoreVT, BitcastVT); + return isLoadBitCastBeneficial(StoreVT, BitcastVT, CheckAlignment); } /// Return true if it is expected to be cheaper to do a store of a non-zero diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11040,14 +11040,15 @@ // as we assume software couldn't rely on the number of accesses of an // illegal type. ((!LegalOperations && !cast(N0)->isVolatile()) || - TLI.isOperationLegal(ISD::LOAD, VT)) && - TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { + TLI.isOperationLegal(ISD::LOAD, VT))) { LoadSDNode *LN0 = cast(N0); + bool CheckAlignment = true; bool Fast = false; - if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, - *LN0->getMemOperand(), &Fast) && - Fast) { + if (TLI.isLoadBitCastBeneficial(N0.getValueType(), VT, CheckAlignment) && + (!CheckAlignment || + (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, + *LN0->getMemOperand(), &Fast) && Fast))) { SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), LN0->getPointerInfo(), LN0->getAlignment(), @@ -16173,12 +16174,14 @@ // as we assume software couldn't rely on the number of accesses of an // illegal type. if (((!LegalOperations && !ST->isVolatile()) || - TLI.isOperationLegal(ISD::STORE, SVT)) && - TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT)) { + TLI.isOperationLegal(ISD::STORE, SVT))) { + bool CheckAlignment = true; bool Fast = false; - if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), SVT, - *ST->getMemOperand(), &Fast) && - Fast) { + if (TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT, + CheckAlignment) && + (!CheckAlignment || + (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), SVT, + *ST->getMemOperand(), &Fast) && Fast))) { return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr, ST->getPointerInfo(), ST->getAlignment(), ST->getMemOperand()->getFlags(), ST->getAAInfo()); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -182,7 +182,7 @@ ISD::LoadExtType ExtType, EVT ExtVT) const override; - bool isLoadBitCastBeneficial(EVT, EVT) const final; + bool isLoadBitCastBeneficial(EVT, EVT, bool &CheckAlignment) const final; bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -719,8 +719,9 @@ return (OldSize < 32); } -bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, - EVT CastTy) const { +bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy, + bool &CheckAlignment) const { + CheckAlignment = true; assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits()); diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1127,7 +1127,8 @@ return NumElem > 2; } - bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT) const override; + bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, + bool &CheckAlignment) const override; /// Intel processors have a unified instruction and data cache const char * getClearCacheBuiltinName() const override { diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4941,8 +4941,8 @@ return Subtarget.hasLZCNT(); } -bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, - EVT BitcastVT) const { +bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, + bool &CheckAlignment) const { if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() && BitcastVT.getVectorElementType() == MVT::i1) return false; @@ -4950,7 +4950,14 @@ if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8) return false; - return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT); + if (LoadVT.isVector() && BitcastVT.isVector() && + isTypeLegal(LoadVT) && isTypeLegal(BitcastVT)) { + CheckAlignment = false; + return true; + } + + return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, + CheckAlignment); } bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT, diff --git a/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll b/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll --- a/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll @@ -306,27 +306,25 @@ ; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 ; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, (%eax) -; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 -; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm0[2,3] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] ; X86-SSE2-NEXT: movd %xmm2, %ecx ; X86-SSE2-NEXT: movntil %ecx, 12(%eax) ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X86-SSE2-NEXT: movd %xmm2, %ecx ; X86-SSE2-NEXT: movntil %ecx, 8(%eax) -; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, 4(%eax) ; X86-SSE2-NEXT: movd %xmm1, %ecx ; X86-SSE2-NEXT: movntil %ecx, 16(%eax) -; X86-SSE2-NEXT: movdqa %xmm1, %xmm0 -; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[2,3] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] ; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, 28(%eax) ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, 24(%eax) -; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; X86-SSE2-NEXT: movd %xmm1, %ecx +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, 20(%eax) ; X86-SSE2-NEXT: retl ; @@ -421,27 +419,25 @@ ; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 ; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, (%eax) -; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 -; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm0[2,3] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] ; X86-SSE2-NEXT: movd %xmm2, %ecx ; X86-SSE2-NEXT: movntil %ecx, 12(%eax) ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; X86-SSE2-NEXT: movd %xmm2, %ecx ; X86-SSE2-NEXT: movntil %ecx, 8(%eax) -; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, 4(%eax) ; X86-SSE2-NEXT: movd %xmm1, %ecx ; X86-SSE2-NEXT: movntil %ecx, 16(%eax) -; X86-SSE2-NEXT: movdqa %xmm1, %xmm0 -; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[2,3] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] ; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, 28(%eax) ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, 24(%eax) -; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; X86-SSE2-NEXT: movd %xmm1, %ecx +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; X86-SSE2-NEXT: movd %xmm0, %ecx ; X86-SSE2-NEXT: movntil %ecx, 20(%eax) ; X86-SSE2-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll @@ -2441,8 +2441,7 @@ define <4 x float> @shuffle_mem_v4f32_0145(<4 x float> %a, <4 x float>* %pb) { ; SSE-LABEL: shuffle_mem_v4f32_0145: ; SSE: # %bb.0: -; SSE-NEXT: movups (%rdi), %xmm1 -; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] ; SSE-NEXT: retq ; ; AVX-LABEL: shuffle_mem_v4f32_0145: @@ -2457,20 +2456,17 @@ define <4 x float> @shuffle_mem_v4f32_4523(<4 x float> %a, <4 x float>* %pb) { ; SSE2-LABEL: shuffle_mem_v4f32_4523: ; SSE2: # %bb.0: -; SSE2-NEXT: movupd (%rdi), %xmm1 -; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; SSE2-NEXT: retq ; ; SSE3-LABEL: shuffle_mem_v4f32_4523: ; SSE3: # %bb.0: -; SSE3-NEXT: movupd (%rdi), %xmm1 -; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE3-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: shuffle_mem_v4f32_4523: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movupd (%rdi), %xmm1 -; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSSE3-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: shuffle_mem_v4f32_4523: