Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -12035,7 +12035,8 @@ SDValue StVal = S->getValue(); EVT VT = StVal.getValueType(); - if (!VT.isVector()) + // All SVE vectors should be aligned to 16 bytes + if (!VT.isVector() || VT.isScalableVector()) return SDValue(); // If we get a splat of zeros, convert this vector store to a store of Index: llvm/test/CodeGen/AArch64/sve-callbyref-notailcall.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-callbyref-notailcall.ll +++ llvm/test/CodeGen/AArch64/sve-callbyref-notailcall.ll @@ -1,6 +1,8 @@ ; Because some arguments are passed by reference (through stack), ; the compiler should not do tail-call optimization. -; RUN: llc -mtriple=aarch64 -mattr=+sve < %s | FileCheck %s +; RUN: llc -mtriple=aarch64 -mattr=+sve < %s 2>&1 | FileCheck %s + +; CHECK-NOT: warning ; CHECK-LABEL: caller: ; CHECK: addvl sp, sp, #-[[STACKSIZE:[0-9]+]] Index: llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll +++ llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll @@ -1,4 +1,6 @@ -; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -stop-after=finalize-isel < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -stop-after=finalize-isel < %s 2>&1 | FileCheck %s + +; CHECK-NOT: warning ; Test that z8 and z9, passed in by reference, are correctly loaded from x0 and x1. ; i.e. z0 = %z0