diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -13966,10 +13966,11 @@ if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64) return false; - if (Subtarget->useSVEForFixedLengthVectors() && - (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 || - (VecSize < Subtarget->getMinSVEVectorSizeInBits() && - isPowerOf2_32(NumElements) && VecSize > 128))) { + if (Subtarget->forceStreamingCompatibleSVE() || + (Subtarget->useSVEForFixedLengthVectors() && + (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 || + (VecSize < Subtarget->getMinSVEVectorSizeInBits() && + isPowerOf2_32(NumElements) && VecSize > 128)))) { UseScalable = true; return true; } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll @@ -8,23 +8,31 @@ define void @st1d_fixed(ptr %st_ptr) #0 { ; CHECK-LABEL: st1d_fixed: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #160 -; CHECK-NEXT: .cfi_def_cfa_offset 160 -; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #144] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 +; CHECK-NEXT: str x29, [sp, #-32]! // 8-byte Folded Spill +; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #128 ; CHECK-NEXT: mov x19, x0 ; CHECK-NEXT: mov x0, sp -; CHECK-NEXT: mov x20, sp ; CHECK-NEXT: bl def -; CHECK-NEXT: ld2 { v0.2d, v1.2d }, [x20], #32 -; CHECK-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload -; CHECK-NEXT: ld2 { v2.2d, v3.2d }, [x20] +; CHECK-NEXT: cntd x8 +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: sub x8, x8, #2 +; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [sp] +; CHECK-NEXT: mov w9, #2 +; CHECK-NEXT: cmp x8, #2 +; CHECK-NEXT: csel x8, x8, x9, lo +; CHECK-NEXT: add x10, sp, #128 +; CHECK-NEXT: lsl x8, x8, #3 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: add x9, sp, #128 +; CHECK-NEXT: st1d { z0.d }, p0, [x10] +; CHECK-NEXT: ldr q2, [x9, x8] ; CHECK-NEXT: stp q0, q2, [x19] -; CHECK-NEXT: ldp x20, x19, [sp, #144] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #160 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #128 +; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldr x29, [sp], #32 // 8-byte Folded Reload ; CHECK-NEXT: ret %alloc = alloca [16 x double] call void @def(ptr %alloc) @@ -34,4 +42,4 @@ ret void } -attributes #0 = { "target-features"="+sve" } +attributes #0 = { "target-features"="+sve" nounwind}