Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1196,6 +1196,51 @@ (ADR_LSL_ZZZ_D_2 $Op1, $Op2)>; def : Pat<(nxv2i64 (int_aarch64_sve_adrd nxv2i64:$Op1, nxv2i64:$Op2)), (ADR_LSL_ZZZ_D_3 $Op1, $Op2)>; + + // Patterns to generate adr instruction. + // adr z0.d, [z0.d, z0.d, uxtw] + def : Pat<(add nxv2i64:$Op1, + (nxv2i64 (and nxv2i64:$Op2, (nxv2i64 (AArch64dup (i64 0xFFFFFFFF)))))), + (ADR_UXTW_ZZZ_D_0 $Op1, $Op2)>; + // adr z0.d, [z0.d, z0.d, sxtw] + def : Pat<(add nxv2i64:$Op1, + (nxv2i64 (sext_inreg nxv2i64:$Op2, nxv2i32))), + (ADR_SXTW_ZZZ_D_0 $Op1, $Op2)>; + + // adr z0.s, [z0.s, z0.s, lsl #] + // adr z0.d, [z0.d, z0.d, lsl #] + multiclass adrShiftPat { + def : Pat<(add Ty:$Op1, + (Ty (AArch64lsl_p (PredTy (SVEAllActive)), + Ty:$Op2, + (Ty (AArch64dup (i64 ShiftAmt)))))), + (DestAdrIns $Op1, $Op2)>; + } + defm : adrShiftPat; + defm : adrShiftPat; + defm : adrShiftPat; + defm : adrShiftPat; + defm : adrShiftPat; + defm : adrShiftPat; + + // adr z0.d, [z0.d, z0.d, uxtw #] + // adr z0.d, [z0.d, z0.d, sxtw #] + multiclass adrXtwShiftPat { + def : Pat<(add Ty:$Op1, + (Ty (AArch64lsl_p (PredTy (SVEAllActive)), + (Ty (and Ty:$Op2, (Ty (AArch64dup (i64 0xFFFFFFFF))))), + (Ty (AArch64dup (i64 ShiftAmt)))))), + (!cast("ADR_UXTW_ZZZ_D_"#ShiftAmt) $Op1, $Op2)>; + + def : Pat<(add Ty:$Op1, + (Ty (AArch64lsl_p (PredTy (SVEAllActive)), + (Ty (sext_inreg Ty:$Op2, nxv2i32)), + (Ty (AArch64dup (i64 ShiftAmt)))))), + (!cast("ADR_SXTW_ZZZ_D_"#ShiftAmt) $Op1, $Op2)>; + } + defm : adrXtwShiftPat; + defm : adrXtwShiftPat; + defm : adrXtwShiftPat; } // End HasSVE let Predicates = [HasSVEorStreamingSVE] in { Index: llvm/test/CodeGen/AArch64/sve-gep.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-gep.ll +++ llvm/test/CodeGen/AArch64/sve-gep.ll @@ -87,9 +87,8 @@ define @scalable_of_fixed_3_i16(i16* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_3_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: lsl z0.d, z0.d, #1 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #1] ; CHECK-NEXT: ret %d = getelementptr i16, i16* %base, %idx ret %d @@ -98,9 +97,8 @@ define @scalable_of_fixed_3_i32(i32* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_3_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: lsl z0.d, z0.d, #2 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #2] ; CHECK-NEXT: ret %d = getelementptr i32, i32* %base, %idx ret %d @@ -109,9 +107,8 @@ define @scalable_of_fixed_3_i64(i64* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_3_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: lsl z0.d, z0.d, #3 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #3] ; CHECK-NEXT: ret %d = getelementptr i64, i64* %base, %idx ret %d @@ -120,10 +117,8 @@ define @scalable_of_fixed_4_i8(i8* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_4_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: sxtw z0.d, p0/m, z0.d ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw] ; CHECK-NEXT: ret %d = getelementptr i8, i8* %base, %idx ret %d @@ -132,11 +127,8 @@ define @scalable_of_fixed_4_i16(i16* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_4_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: sxtw z0.d, p0/m, z0.d -; CHECK-NEXT: lsl z0.d, z0.d, #1 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #1] ; CHECK-NEXT: ret %d = getelementptr i16, i16* %base, %idx ret %d @@ -145,11 +137,8 @@ define @scalable_of_fixed_4_i32(i32* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_4_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: sxtw z0.d, p0/m, z0.d -; CHECK-NEXT: lsl z0.d, z0.d, #2 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #2] ; CHECK-NEXT: ret %d = getelementptr i32, i32* %base, %idx ret %d @@ -158,11 +147,8 @@ define @scalable_of_fixed_4_i64(i64* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_4_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: sxtw z0.d, p0/m, z0.d -; CHECK-NEXT: lsl z0.d, z0.d, #3 ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #3] ; CHECK-NEXT: ret %d = getelementptr i64, i64* %base, %idx ret %d @@ -172,8 +158,7 @@ ; CHECK-LABEL: scalable_of_fixed_5: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw] ; CHECK-NEXT: ret %idxZext = zext %idx to %d = getelementptr i8, i8* %base, %idxZext @@ -183,10 +168,8 @@ define @scalable_of_fixed_5_i16(i16* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_5_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: lsl z0.d, z0.d, #1 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #1] ; CHECK-NEXT: ret %idxZext = zext %idx to %d = getelementptr i16, i16* %base, %idxZext @@ -196,10 +179,8 @@ define @scalable_of_fixed_5_i32(i32* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_5_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: lsl z0.d, z0.d, #2 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #2] ; CHECK-NEXT: ret %idxZext = zext %idx to %d = getelementptr i32, i32* %base, %idxZext @@ -210,10 +191,8 @@ define @scalable_of_fixed_5_i64(i64* %base, %idx) { ; CHECK-LABEL: scalable_of_fixed_5_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: lsl z0.d, z0.d, #3 -; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #3] ; CHECK-NEXT: ret %idxZext = zext %idx to %d = getelementptr i64, i64* %base, %idxZext