diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1648,6 +1648,9 @@ def : Pat<(vscale (sve_cntd_imm_neg i32:$imm)), (SUBXrs XZR, (CNTD_XPiI 31, $imm), 0)>; } + def : Pat<(add GPR64:$op, (vscale (sve_rdvl_imm i32:$imm))), + (ADDVL_XXI GPR64:$op, $imm)>; + // FIXME: BigEndian requires an additional REV instruction to satisfy the // constraint that none of the bits change when stored to memory as one // type, and and reloaded as another type. diff --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll --- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll +++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll @@ -29,27 +29,27 @@ ; CHECK-NEXT: lsl x10, x10, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: str q1, [x9, x10] -; CHECK-NEXT: addvl x10, sp, #1 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] ; CHECK-NEXT: mov w9, #2 ; CHECK-NEXT: cmp x8, #2 // =2 ; CHECK-NEXT: csel x9, x8, x9, lo +; CHECK-NEXT: addvl x10, sp, #1 ; CHECK-NEXT: lsl x9, x9, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #1, mul vl] ; CHECK-NEXT: str q2, [x10, x9] -; CHECK-NEXT: addvl x10, sp, #2 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl] ; CHECK-NEXT: mov w9, #4 ; CHECK-NEXT: cmp x8, #4 // =4 ; CHECK-NEXT: csel x9, x8, x9, lo +; CHECK-NEXT: addvl x10, sp, #2 ; CHECK-NEXT: lsl x9, x9, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl] ; CHECK-NEXT: str q3, [x10, x9] -; CHECK-NEXT: addvl x10, sp, #3 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl] ; CHECK-NEXT: mov w9, #6 ; CHECK-NEXT: cmp x8, #6 // =6 ; CHECK-NEXT: csel x8, x8, x9, lo +; CHECK-NEXT: addvl x10, sp, #3 ; CHECK-NEXT: lsl x8, x8, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #3, mul vl] ; CHECK-NEXT: str q4, [x10, x8] @@ -82,27 +82,27 @@ ; CHECK-NEXT: lsl x10, x10, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: str q1, [x9, x10] -; CHECK-NEXT: addvl x10, sp, #1 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] ; CHECK-NEXT: mov w9, #2 ; CHECK-NEXT: cmp x8, #2 // =2 ; CHECK-NEXT: csel x9, x8, x9, lo +; CHECK-NEXT: addvl x10, sp, #1 ; CHECK-NEXT: lsl x9, x9, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #1, mul vl] ; CHECK-NEXT: str q2, [x10, x9] -; CHECK-NEXT: addvl x10, sp, #2 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl] ; CHECK-NEXT: mov w9, #4 ; CHECK-NEXT: cmp x8, #4 // =4 ; CHECK-NEXT: csel x9, x8, x9, lo +; CHECK-NEXT: addvl x10, sp, #2 ; CHECK-NEXT: lsl x9, x9, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl] ; CHECK-NEXT: str q3, [x10, x9] -; CHECK-NEXT: addvl x10, sp, #3 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl] ; CHECK-NEXT: mov w9, #6 ; CHECK-NEXT: cmp x8, #6 // =6 ; CHECK-NEXT: csel x8, x8, x9, lo +; CHECK-NEXT: addvl x10, sp, #3 ; CHECK-NEXT: lsl x8, x8, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp, #3, mul vl] ; CHECK-NEXT: str q4, [x10, x8] diff --git a/llvm/test/CodeGen/AArch64/sve-gep.ll b/llvm/test/CodeGen/AArch64/sve-gep.ll --- a/llvm/test/CodeGen/AArch64/sve-gep.ll +++ b/llvm/test/CodeGen/AArch64/sve-gep.ll @@ -8,8 +8,7 @@ define * @scalar_of_scalable_1(* %base) { ; CHECK-LABEL: scalar_of_scalable_1: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #4 -; CHECK-NEXT: add x0, x0, x8 +; CHECK-NEXT: addvl x0, x0, #4 ; CHECK-NEXT: ret %d = getelementptr , * %base, i64 4 ret * %d diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll @@ -21,8 +21,7 @@ define @ldnf1b_out_of_lower_bound( %pg, i8* %a) { ; CHECK-LABEL: ldnf1b_out_of_lower_bound: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #-9 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #-9 ; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x8] ; CHECK-NEXT: ret %base_scalable = bitcast i8* %a to * @@ -71,8 +70,7 @@ define @ldnf1b_out_of_upper_bound( %pg, i8* %a) { ; CHECK-LABEL: ldnf1b_out_of_upper_bound: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #8 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #8 ; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x8] ; CHECK-NEXT: ret %base_scalable = bitcast i8* %a to * diff --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll @@ -43,8 +43,7 @@ define @ld1b_out_of_upper_bound(* %a) { ; CHECK-LABEL: ld1b_out_of_upper_bound: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #8 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #8 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8] ; CHECK-NEXT: ret @@ -56,8 +55,7 @@ define @ld1b_out_of_lower_bound(* %a) { ; CHECK-LABEL: ld1b_out_of_lower_bound: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #-9 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #-9 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8] ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll @@ -13,11 +13,9 @@ define void @imm_out_of_range( * %base, %mask) nounwind { ; CHECK-LABEL: imm_out_of_range: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #8 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #8 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: rdvl x8, #-9 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #-9 ; CHECK-NEXT: st1d { z0.d }, p0, [x8] ; CHECK-NEXT: ret %base_load = getelementptr , * %base, i64 8 diff --git a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll @@ -13,11 +13,9 @@ define void @imm_out_of_range( * %base, %mask) nounwind { ; CHECK-LABEL: imm_out_of_range: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #8 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #8 ; CHECK-NEXT: ldnt1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: rdvl x8, #-9 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #-9 ; CHECK-NEXT: stnt1d { z0.d }, p0, [x8] ; CHECK-NEXT: ret %base_load = getelementptr , * %base, i64 8 diff --git a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll @@ -43,8 +43,7 @@ define void @st1b_out_of_upper_bound( %data, * %a) { ; CHECK-LABEL: st1b_out_of_upper_bound: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #8 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #8 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: st1b { z0.b }, p0, [x8] ; CHECK-NEXT: ret @@ -56,8 +55,7 @@ define void @st1b_out_of_lower_bound( %data, * %a) { ; CHECK-LABEL: st1b_out_of_lower_bound: ; CHECK: // %bb.0: -; CHECK-NEXT: rdvl x8, #-9 -; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: addvl x8, x0, #-9 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: st1b { z0.b }, p0, [x8] ; CHECK-NEXT: ret