diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1196,6 +1196,51 @@ (ADR_LSL_ZZZ_D_2 $Op1, $Op2)>; def : Pat<(nxv2i64 (int_aarch64_sve_adrd nxv2i64:$Op1, nxv2i64:$Op2)), (ADR_LSL_ZZZ_D_3 $Op1, $Op2)>; + + // Patterns to generate adr instruction. + // adr z0.d, [z0.d, z0.d, uxtw] + def : Pat<(add nxv2i64:$Op1, + (nxv2i64 (and nxv2i64:$Op2, (nxv2i64 (AArch64dup (i64 0xFFFFFFFF)))))), + (ADR_UXTW_ZZZ_D_0 $Op1, $Op2)>; + // adr z0.d, [z0.d, z0.d, sxtw] + def : Pat<(add nxv2i64:$Op1, + (nxv2i64 (sext_inreg nxv2i64:$Op2, nxv2i32))), + (ADR_SXTW_ZZZ_D_0 $Op1, $Op2)>; + + // adr z0.s, [z0.s, z0.s, lsl #] + // adr z0.d, [z0.d, z0.d, lsl #] + multiclass adrShiftPat { + def : Pat<(add Ty:$Op1, + (Ty (AArch64lsl_p (PredTy (SVEAllActive)), + Ty:$Op2, + (Ty (AArch64dup (ShiftTy ShiftAmt)))))), + (DestAdrIns $Op1, $Op2)>; + } + defm : adrShiftPat; + defm : adrShiftPat; + defm : adrShiftPat; + defm : adrShiftPat; + defm : adrShiftPat; + defm : adrShiftPat; + + // adr z0.d, [z0.d, z0.d, uxtw #] + // adr z0.d, [z0.d, z0.d, sxtw #] + multiclass adrXtwShiftPat { + def : Pat<(add Ty:$Op1, + (Ty (AArch64lsl_p (PredTy (SVEAllActive)), + (Ty (and Ty:$Op2, (Ty (AArch64dup (i64 0xFFFFFFFF))))), + (Ty (AArch64dup (i64 ShiftAmt)))))), + (!cast("ADR_UXTW_ZZZ_D_"#ShiftAmt) $Op1, $Op2)>; + + def : Pat<(add Ty:$Op1, + (Ty (AArch64lsl_p (PredTy (SVEAllActive)), + (Ty (sext_inreg Ty:$Op2, nxv2i32)), + (Ty (AArch64dup (i64 ShiftAmt)))))), + (!cast("ADR_SXTW_ZZZ_D_"#ShiftAmt) $Op1, $Op2)>; + } + defm : adrXtwShiftPat; + defm : adrXtwShiftPat; + defm : adrXtwShiftPat; } // End HasSVE let Predicates = [HasSVEorStreamingSVE] in { diff --git a/llvm/test/CodeGen/AArch64/sve-adr.ll b/llvm/test/CodeGen/AArch64/sve-adr.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-adr.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; ADR +; Tests adr z0.s, [z0.s, z0.s, lsl #<1,2,3>] +; Other formats are tested in llvm/test/CodeGen/AArch64/sve-gep.ll +; + +define @adr_32bit_lsl1( %base, %idx) #0 { +; CHECK-LABEL: adr_32bit_lsl1: +; CHECK: // %bb.0: +; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #1] +; CHECK-NEXT: ret + %splat_insert = insertelement poison, i32 1, i32 0 + %one = shufflevector %splat_insert, poison, zeroinitializer + %shiftedOffset = shl %idx, %one + %address = add %base, %shiftedOffset + ret %address +} + +define @adr_32bit_lsl2( %base, %idx) #0 { +; CHECK-LABEL: adr_32bit_lsl2: +; CHECK: // %bb.0: +; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #2] +; CHECK-NEXT: ret + %splat_insert = insertelement poison, i32 2, i32 0 + %two = shufflevector %splat_insert, poison, zeroinitializer + %shiftedOffset = shl %idx, %two + %address = add %base, %shiftedOffset + ret %address +} + +define @adr_32bit_lsl3( %base, %idx) #0 { +; CHECK-LABEL: adr_32bit_lsl3: +; CHECK: // %bb.0: +; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #3] +; CHECK-NEXT: ret + %splat_insert = insertelement poison, i32 3, i32 0 + %three = shufflevector %splat_insert, poison, zeroinitializer + %shiftedOffset = shl %idx, %three + %address = add %base, %shiftedOffset + ret %address +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-gep.ll b/llvm/test/CodeGen/AArch64/sve-gep.ll --- a/llvm/test/CodeGen/AArch64/sve-gep.ll +++ b/llvm/test/CodeGen/AArch64/sve-gep.ll @@ -87,9 +87,8 @@ define @scalable_of_fixed_3_i16(i16* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_3_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: lsl z0.d, z0.d, #1 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #1] ; CHECK-NEXT: ret %d = getelementptr i16, i16* %base, %idx ret %d @@ -98,9 +97,8 @@ define @scalable_of_fixed_3_i32(i32* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_3_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: lsl z0.d, z0.d, #2 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #2] ; CHECK-NEXT: ret %d = getelementptr i32, i32* %base, %idx ret %d @@ -109,9 +107,8 @@ define @scalable_of_fixed_3_i64(i64* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_3_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: lsl z0.d, z0.d, #3 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #3] ; CHECK-NEXT: ret %d = getelementptr i64, i64* %base, %idx ret %d @@ -120,10 +117,8 @@ define @scalable_of_fixed_4_i8(i8* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_4_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: sxtw z0.d, p0/m, z0.d ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw] ; CHECK-NEXT: ret %d = getelementptr i8, i8* %base, %idx ret %d @@ -132,11 +127,8 @@ define @scalable_of_fixed_4_i16(i16* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_4_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: sxtw z0.d, p0/m, z0.d -; CHECK-NEXT: lsl z0.d, z0.d, #1 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #1] ; CHECK-NEXT: ret %d = getelementptr i16, i16* %base, %idx ret %d @@ -145,11 +137,8 @@ define @scalable_of_fixed_4_i32(i32* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_4_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: sxtw z0.d, p0/m, z0.d -; CHECK-NEXT: lsl z0.d, z0.d, #2 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #2] ; CHECK-NEXT: ret %d = getelementptr i32, i32* %base, %idx ret %d @@ -158,11 +147,8 @@ define @scalable_of_fixed_4_i64(i64* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_4_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: sxtw z0.d, p0/m, z0.d -; CHECK-NEXT: lsl z0.d, z0.d, #3 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #3] ; CHECK-NEXT: ret %d = getelementptr i64, i64* %base, %idx ret %d @@ -172,8 +158,7 @@ ; CHECK-LABEL: scalable_of_fixed_5: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw] ; CHECK-NEXT: ret %idxZext = zext %idx to %d = getelementptr i8, i8* %base, %idxZext @@ -183,10 +168,8 @@ define @scalable_of_fixed_5_i16(i16* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_5_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: lsl z0.d, z0.d, #1 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #1] ; CHECK-NEXT: ret %idxZext = zext %idx to %d = getelementptr i16, i16* %base, %idxZext @@ -196,10 +179,8 @@ define @scalable_of_fixed_5_i32(i32* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_5_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: lsl z0.d, z0.d, #2 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #2] ; CHECK-NEXT: ret %idxZext = zext %idx to %d = getelementptr i32, i32* %base, %idxZext @@ -210,10 +191,8 @@ define @scalable_of_fixed_5_i64(i64* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_5_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: lsl z0.d, z0.d, #3 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #3] ; CHECK-NEXT: ret %idxZext = zext %idx to %d = getelementptr i64, i64* %base, %idxZext