Index: llvm/lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2270,6 +2270,19 @@ case AArch64::LD1SW_D_IMM: case AArch64::LD1D_IMM: + case AArch64::LD2B_IMM: + case AArch64::LD2H_IMM: + case AArch64::LD2W_IMM: + case AArch64::LD2D_IMM: + case AArch64::LD3B_IMM: + case AArch64::LD3H_IMM: + case AArch64::LD3W_IMM: + case AArch64::LD3D_IMM: + case AArch64::LD4B_IMM: + case AArch64::LD4H_IMM: + case AArch64::LD4W_IMM: + case AArch64::LD4D_IMM: + case AArch64::ST1B_IMM: case AArch64::ST1B_H_IMM: case AArch64::ST1B_S_IMM: @@ -2281,6 +2294,19 @@ case AArch64::ST1W_D_IMM: case AArch64::ST1D_IMM: + case AArch64::ST2B_IMM: + case AArch64::ST2H_IMM: + case AArch64::ST2W_IMM: + case AArch64::ST2D_IMM: + case AArch64::ST3B_IMM: + case AArch64::ST3H_IMM: + case AArch64::ST3W_IMM: + case AArch64::ST3D_IMM: + case AArch64::ST4B_IMM: + case AArch64::ST4H_IMM: + case AArch64::ST4W_IMM: + case AArch64::ST4D_IMM: + case AArch64::LD1RB_IMM: case AArch64::LD1RB_H_IMM: case AArch64::LD1RB_S_IMM: @@ -2897,6 +2923,45 @@ MinOffset = -8; MaxOffset = 7; break; + case AArch64::LD2B_IMM: + case AArch64::LD2H_IMM: + case AArch64::LD2W_IMM: + case AArch64::LD2D_IMM: + case AArch64::ST2B_IMM: + case AArch64::ST2H_IMM: + case AArch64::ST2W_IMM: + case AArch64::ST2D_IMM: + Scale = TypeSize::Scalable(32); + Width = SVEMaxBytesPerVector * 2; + MinOffset = -16; + MaxOffset = 14; + break; + case AArch64::LD3B_IMM: + case AArch64::LD3H_IMM: + case AArch64::LD3W_IMM: + case AArch64::LD3D_IMM: + case AArch64::ST3B_IMM: + case AArch64::ST3H_IMM: + case AArch64::ST3W_IMM: + case AArch64::ST3D_IMM: + Scale = TypeSize::Scalable(48); + Width = SVEMaxBytesPerVector * 3; + MinOffset = -24; + MaxOffset = 21; + break; + case AArch64::LD4B_IMM: + case AArch64::LD4H_IMM: + case AArch64::LD4W_IMM: + case AArch64::LD4D_IMM: + case AArch64::ST4B_IMM: + case AArch64::ST4H_IMM: + case AArch64::ST4W_IMM: + case AArch64::ST4D_IMM: + Scale = TypeSize::Scalable(64); + Width = SVEMaxBytesPerVector * 4; + MinOffset = -32; + MaxOffset = 28; + break; case AArch64::LD1B_H_IMM: case AArch64::LD1SB_H_IMM: case AArch64::LD1H_S_IMM: Index: llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll @@ -0,0 +1,27 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @st1d_fixed(<8 x double>* %ptr) #0 { +; CHECK-LABEL: st1d_fixed: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: add x8, sp, #8 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x8] +; CHECK-NEXT: mov x8, #4 +; CHECK-NEXT: mov z0.d, #0 // =0x0 +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %alloc = alloca [16 x double], i32 0 + %bc = bitcast [16 x double]* %alloc to <8 x double>* + %load = load <8 x double>, <8 x double>* %bc + %strided.vec = shufflevector <8 x double> %load, <8 x double> poison, <4 x i32> + store <8 x double> zeroinitializer, <8 x double>* %ptr + ret void +} + +attributes #0 = { "target-features"="+sve" vscale_range(2,2) nounwind } Index: llvm/test/CodeGen/AArch64/sve-ldN.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-ldN.mir @@ -0,0 +1,171 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -run-pass=prologepilog -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s + +--- | + define void @testcase_valid_offset() nounwind { entry: unreachable } + define void @testcase_offset_out_of_range() nounwind { entry: unreachable } +... +--- +name: testcase_valid_offset +tracksRegLiveness: true +stack: + - { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector } +body: | + bb.0: + liveins: $p0 + + ; CHECK-LABEL: name: testcase_valid_offset + ; CHECK: liveins: $p0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1) + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 + ; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, $sp, -16 + ; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, $sp, 14 + ; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, $sp, -16 + ; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, $sp, 14 + ; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, $sp, -16 + ; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, $sp, 14 + ; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, $sp, -16 + ; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, $sp, 14 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, $sp, -24 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, $sp, 21 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, $sp, -24 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, $sp, 21 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, $sp, -24 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, $sp, 21 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, $sp, -24 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, $sp, 21 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, $sp, -32 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, $sp, 28 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, $sp, -32 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, $sp, 28 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, $sp, -32 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, $sp, 28 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, $sp, -32 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, $sp, 28 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1 + ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1) + ; CHECK-NEXT: RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3 + renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, -16 + renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, 14 + renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, -16 + renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, 14 + renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, -16 + renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, 14 + renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, -16 + renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, 14 + + renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, -24 + renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, 21 + renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, -24 + renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, 21 + renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, -24 + renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, 21 + renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, -24 + renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, 21 + + renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, -32 + renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, 28 + renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, -32 + renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, 28 + renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, -32 + renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, 28 + renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, -32 + renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, 28 + RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3 +... +--- +name: testcase_offset_out_of_range +tracksRegLiveness: true +stack: + - { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector } +body: | + bb.0: + liveins: $p0 + + ; CHECK-LABEL: name: testcase_offset_out_of_range + ; CHECK: liveins: $p0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1) + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4 + ; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, killed $x8, -16 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4 + ; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, killed $x8, 14 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4 + ; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, killed $x8, -16 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4 + ; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, killed $x8, 14 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4 + ; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, killed $x8, -16 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4 + ; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, killed $x8, 14 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4 + ; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, killed $x8, -16 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4 + ; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, killed $x8, 14 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -9 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, killed $x8, -24 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 9 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, killed $x8, 21 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -9 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, killed $x8, -24 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 9 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, killed $x8, 21 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -9 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, killed $x8, -24 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 9 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, killed $x8, 21 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -9 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, killed $x8, -24 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 9 + ; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, killed $x8, 21 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -16 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, killed $x8, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 16 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, killed $x8, 28 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -16 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, killed $x8, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 16 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, killed $x8, 28 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -16 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, killed $x8, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 16 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, killed $x8, 28 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -16 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, killed $x8, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 16 + ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, killed $x8, 28 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1 + ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1) + ; CHECK-NEXT: RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3 + renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, -18 + renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, 16 + renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, -18 + renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, 16 + renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, -18 + renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, 16 + renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, -18 + renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, 16 + + renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, -27 + renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, 24 + renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, -27 + renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, 24 + renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, -27 + renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, 24 + renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, -27 + renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, 24 + + renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, -36 + renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, 32 + renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, -36 + renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, 32 + renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, -36 + renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, 32 + renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, -36 + renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, 32 + RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3 +... Index: llvm/test/CodeGen/AArch64/sve-stN.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-stN.mir @@ -0,0 +1,171 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -run-pass=prologepilog -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s + +--- | + define void @testcase_valid_offset() nounwind { entry: unreachable } + define void @testcase_offset_out_of_range() nounwind { entry: unreachable } +... +--- +name: testcase_valid_offset +tracksRegLiveness: true +stack: + - { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector } +body: | + bb.0: + liveins: $p0, $z0 + + ; CHECK-LABEL: name: testcase_valid_offset + ; CHECK: liveins: $p0, $z0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1) + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 + ; CHECK-NEXT: ST2B_IMM renamable $z0_z1, renamable $p0, $sp, -16 + ; CHECK-NEXT: ST2B_IMM renamable $z0_z1, renamable $p0, $sp, 14 + ; CHECK-NEXT: ST2H_IMM renamable $z0_z1, renamable $p0, $sp, -16 + ; CHECK-NEXT: ST2H_IMM renamable $z0_z1, renamable $p0, $sp, 14 + ; CHECK-NEXT: ST2W_IMM renamable $z0_z1, renamable $p0, $sp, -16 + ; CHECK-NEXT: ST2W_IMM renamable $z0_z1, renamable $p0, $sp, 14 + ; CHECK-NEXT: ST2D_IMM renamable $z0_z1, renamable $p0, $sp, -16 + ; CHECK-NEXT: ST2D_IMM renamable $z0_z1, renamable $p0, $sp, 14 + ; CHECK-NEXT: ST3B_IMM renamable $z0_z1_z2, renamable $p0, $sp, -24 + ; CHECK-NEXT: ST3B_IMM renamable $z0_z1_z2, renamable $p0, $sp, 21 + ; CHECK-NEXT: ST3H_IMM renamable $z0_z1_z2, renamable $p0, $sp, -24 + ; CHECK-NEXT: ST3H_IMM renamable $z0_z1_z2, renamable $p0, $sp, 21 + ; CHECK-NEXT: ST3W_IMM renamable $z0_z1_z2, renamable $p0, $sp, -24 + ; CHECK-NEXT: ST3W_IMM renamable $z0_z1_z2, renamable $p0, $sp, 21 + ; CHECK-NEXT: ST3D_IMM renamable $z0_z1_z2, renamable $p0, $sp, -24 + ; CHECK-NEXT: ST3D_IMM renamable $z0_z1_z2, renamable $p0, $sp, 21 + ; CHECK-NEXT: ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, -32 + ; CHECK-NEXT: ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, 28 + ; CHECK-NEXT: ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, -32 + ; CHECK-NEXT: ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, 28 + ; CHECK-NEXT: ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, -32 + ; CHECK-NEXT: ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, 28 + ; CHECK-NEXT: ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, -32 + ; CHECK-NEXT: ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, 28 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1 + ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1) + ; CHECK-NEXT: RET_ReallyLR + ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, -16 + ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, 14 + ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, -16 + ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, 14 + ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, -16 + ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, 14 + ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, -16 + ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, 14 + + ST3B_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -24 + ST3B_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 21 + ST3H_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -24 + ST3H_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 21 + ST3W_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -24 + ST3W_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 21 + ST3D_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -24 + ST3D_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 21 + + ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -32 + ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 28 + ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -32 + ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 28 + ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -32 + ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 28 + ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -32 + ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 28 + RET_ReallyLR +... +--- +name: testcase_offset_out_of_range +tracksRegLiveness: true +stack: + - { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector } +body: | + bb.0: + liveins: $p0, $z0 + + ; CHECK-LABEL: name: testcase_offset_out_of_range + ; CHECK: liveins: $p0, $z0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1) + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4 + ; CHECK-NEXT: ST2B_IMM renamable $z0_z1, renamable $p0, killed $x8, -16 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4 + ; CHECK-NEXT: ST2B_IMM renamable $z0_z1, renamable $p0, killed $x8, 14 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4 + ; CHECK-NEXT: ST2H_IMM renamable $z0_z1, renamable $p0, killed $x8, -16 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4 + ; CHECK-NEXT: ST2H_IMM renamable $z0_z1, renamable $p0, killed $x8, 14 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4 + ; CHECK-NEXT: ST2W_IMM renamable $z0_z1, renamable $p0, killed $x8, -16 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4 + ; CHECK-NEXT: ST2W_IMM renamable $z0_z1, renamable $p0, killed $x8, 14 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4 + ; CHECK-NEXT: ST2D_IMM renamable $z0_z1, renamable $p0, killed $x8, -16 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4 + ; CHECK-NEXT: ST2D_IMM renamable $z0_z1, renamable $p0, killed $x8, 14 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -9 + ; CHECK-NEXT: ST3B_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, -24 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 9 + ; CHECK-NEXT: ST3B_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, 21 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -9 + ; CHECK-NEXT: ST3H_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, -24 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 9 + ; CHECK-NEXT: ST3H_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, 21 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -9 + ; CHECK-NEXT: ST3W_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, -24 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 9 + ; CHECK-NEXT: ST3W_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, 21 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -9 + ; CHECK-NEXT: ST3D_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, -24 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 9 + ; CHECK-NEXT: ST3D_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, 21 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -16 + ; CHECK-NEXT: ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 16 + ; CHECK-NEXT: ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, 28 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -16 + ; CHECK-NEXT: ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 16 + ; CHECK-NEXT: ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, 28 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -16 + ; CHECK-NEXT: ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 16 + ; CHECK-NEXT: ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, 28 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -16 + ; CHECK-NEXT: ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, -32 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 16 + ; CHECK-NEXT: ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, 28 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1 + ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1) + ; CHECK-NEXT: RET_ReallyLR + ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, -18 + ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, 16 + ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, -18 + ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, 16 + ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, -18 + ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, 16 + ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, -18 + ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, 16 + + ST3B_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -27 + ST3B_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 24 + ST3H_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -27 + ST3H_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 24 + ST3W_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -27 + ST3W_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 24 + ST3D_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -27 + ST3D_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 24 + + ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -36 + ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 32 + ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -36 + ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 32 + ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -36 + ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 32 + ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -36 + ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 32 + RET_ReallyLR +...