diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -17325,11 +17325,12 @@ !ST1->getBasePtr().isUndef() && // BaseIndexOffset and the code below requires knowing the size // of a vector, so bail out if MemoryVT is scalable. + !ST->getMemoryVT().isScalableVector() && !ST1->getMemoryVT().isScalableVector()) { const BaseIndexOffset STBase = BaseIndexOffset::match(ST, DAG); const BaseIndexOffset ChainBase = BaseIndexOffset::match(ST1, DAG); - unsigned STBitSize = ST->getMemoryVT().getSizeInBits(); - unsigned ChainBitSize = ST1->getMemoryVT().getSizeInBits(); + unsigned STBitSize = ST->getMemoryVT().getFixedSizeInBits(); + unsigned ChainBitSize = ST1->getMemoryVT().getFixedSizeInBits(); // If this is a store who's preceding store to a subset of the current // location and no one other node is chained to that store we can // effectively drop the store. Do not remove stores to undef as they may diff --git a/llvm/test/CodeGen/AArch64/sve-redundant-store.ll b/llvm/test/CodeGen/AArch64/sve-redundant-store.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-redundant-store.ll @@ -0,0 +1,24 @@ +; RUN: llc -O2 -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s +; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t + +; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. +; WARN-NOT: warning: {{.*}}TypeSize is not scalable + +; #include +; #include +; +; void redundant_store(uint32_t *x) { +; *x = 1; +; *(svint32_t *)x = svdup_s32(0); +; } + +; CHECK-LABEL: @redundant_store +define void @redundant_store(i32* nocapture %x) { + %1 = bitcast i32* %x to * + store i32 1, i32* %x, align 4 + %2 = tail call @llvm.aarch64.sve.dup.x.nxv4i32(i32 0) + store %2, * %1, align 16 + ret void +} + +declare @llvm.aarch64.sve.dup.x.nxv4i32(i32)