Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1568,72 +1568,90 @@ defm Pat_Load_P4 : unpred_load_predicate; defm Pat_Load_P2 : unpred_load_predicate; - multiclass ld1 { - // scalar + immediate (mul vl) + multiclass ld1 { + // reg + reg let AddedComplexity = 1 in { + def : Pat<(Ty (Load (PredTy PPR:$gp), (AddrCP GPR64:$base, GPR64:$offset), MemVT)), + (RegRegInst PPR:$gp, GPR64sp:$base, GPR64:$offset)>; + } + + // scalar + immediate (mul vl) + let AddedComplexity = 2 in { def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)), - (I PPR:$gp, GPR64sp:$base, simm4s1:$offset)>; + (RegImmInst PPR:$gp, GPR64sp:$base, simm4s1:$offset)>; } // base def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), - (I PPR:$gp, GPR64sp:$base, (i64 0))>; + (RegImmInst PPR:$gp, GPR64sp:$base, (i64 0))>; } // 2-element contiguous loads - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; // 4-element contiguous loads - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; // 8-element contiguous loads - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; // 16-element contiguous loads - defm : ld1; + defm : ld1; + multiclass ldnf1 { + // scalar + immediate (mul vl) + let AddedComplexity = 1 in { + def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)), + (I PPR:$gp, GPR64sp:$base, simm4s1:$offset)>; + } + + // base + def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), + (I PPR:$gp, GPR64sp:$base, (i64 0))>; + } // 2-element contiguous non-faulting loads - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; // 4-element contiguous non-faulting loads - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; // 8-element contiguous non-faulting loads - defm : ld1; - defm : ld1; - defm : ld1; - defm : ld1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; // 16-element contiguous non-faulting loads - defm : ld1; + defm : ldnf1; multiclass ldff1 { // reg + reg @@ -1675,35 +1693,42 @@ // 16-element contiguous first faulting loads defm : ldff1; - multiclass st1 { - // scalar + immediate (mul vl) + multiclass st1 { + // reg + reg let AddedComplexity = 1 in { + def : Pat<(Store (Ty ZPR:$vec), (AddrCP GPR64:$base, GPR64:$offset), (PredTy PPR:$gp), MemVT), + (RegRegInst ZPR:$vec, PPR:$gp, GPR64sp:$base, GPR64:$offset)>; + } + + // scalar + immediate (mul vl) + let AddedComplexity = 2 in { def : Pat<(Store (Ty ZPR:$vec), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), (PredTy PPR:$gp), MemVT), - (I ZPR:$vec, PPR:$gp, GPR64sp:$base, simm4s1:$offset)>; + (RegImmInst ZPR:$vec, PPR:$gp, GPR64sp:$base, simm4s1:$offset)>; } // base def : Pat<(Store (Ty ZPR:$vec), GPR64:$base, (PredTy PPR:$gp), MemVT), - (I ZPR:$vec, PPR:$gp, GPR64:$base, (i64 0))>; + (RegImmInst ZPR:$vec, PPR:$gp, GPR64:$base, (i64 0))>; } // 2-element contiguous store - defm : st1; - defm : st1; - defm : st1; - defm : st1; + defm : st1; + defm : st1; + defm : st1; + defm : st1; // 4-element contiguous store - defm : st1; - defm : st1; - defm : st1; + defm : st1; + defm : st1; + defm : st1; // 8-element contiguous store - defm : st1; - defm : st1; + defm : st1; + defm : st1; // 16-element contiguous store - defm : st1; + defm : st1; } Index: llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll @@ -0,0 +1,301 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; LD1B +; + +define @ld1b_upper_bound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_upper_bound: +; CHECK: ld1b { z0.b }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +define @ld1b_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_inbound: +; CHECK: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +define @ld1b_s_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_s_inbound: +; CHECK: ld1b { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pg, i8* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sb_s_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1sb_s_inbound: +; CHECK: ld1sb { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pg, i8* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1b_lower_bound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_lower_bound: +; CHECK: ld1b { z0.b }, p0/z, [x0, #-8, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -8 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +define @ld1b_out_of_upper_bound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_out_of_upper_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #8 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 8 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +define @ld1b_out_of_lower_bound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_out_of_lower_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -9 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +; +; LD1H +; + +define @ld1b_h_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_h_inbound: +; CHECK: ld1b { z0.h }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pg, i8* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sb_h_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1sb_h_inbound: +; CHECK: ld1sb { z0.h }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pg, i8* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1h_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1h_inbound: +; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv8i16( %pg, i16* %base_scalar) + ret %load +} + +define @ld1h_s_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1h_s_inbound: +; CHECK: ld1h { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pg, i16* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sh_s_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1sh_s_inbound: +; CHECK: ld1sh { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pg, i16* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1b_d_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_d_inbound: +; CHECK: ld1b { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pg, i8* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sb_d_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1sb_d_inbound: +; CHECK: ld1sb { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pg, i8* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1h_d_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1h_d_inbound: +; CHECK: ld1h { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pg, i16* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sh_d_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1sh_d_inbound: +; CHECK: ld1sh { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pg, i16* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1h_f16_inbound( %pg, half* %a) { +; CHECK-LABEL: ld1h_f16_inbound: +; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast half* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to half* + %load = call @llvm.aarch64.sve.ld1.nxv8f16( %pg, half* %base_scalar) + ret %load +} + +; +; LD1W +; + +define @ld1w_inbound( %pg, i32* %a) { +; CHECK-LABEL: ld1w_inbound: +; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i32* + %load = call @llvm.aarch64.sve.ld1.nxv4i32( %pg, i32* %base_scalar) + ret %load +} + +define @ld1w_f32_inbound( %pg, float* %a) { +; CHECK-LABEL: ld1w_f32_inbound: +; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast float* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to float* + %load = call @llvm.aarch64.sve.ld1.nxv4f32( %pg, float* %base_scalar) + ret %load +} + +; +; LD1D +; + +define @ld1d_inbound( %pg, i64* %a) { +; CHECK-LABEL: ld1d_inbound: +; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i64* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i64* + %load = call @llvm.aarch64.sve.ld1.nxv2i64( %pg, i64* %base_scalar) + ret %load +} + +define @ld1w_d_inbound( %pg, i32* %a) { +; CHECK-LABEL: ld1w_d_inbound: +; CHECK: ld1w { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i32* + %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pg, i32* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sw_d_inbound( %pg, i32* %a) { +; CHECK-LABEL: ld1sw_d_inbound: +; CHECK: ld1sw { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i32* + %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pg, i32* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1d_f64_inbound( %pg, double* %a) { +; CHECK-LABEL: ld1d_f64_inbound: +; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast double* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to double* + %load = call @llvm.aarch64.sve.ld1.nxv2f64( %pg, double* %base_scalar) + ret %load +} + +declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) + +declare @llvm.aarch64.sve.ld1.nxv8i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv8i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv8f16(, half*) + +declare @llvm.aarch64.sve.ld1.nxv4i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv4i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv4i32(, i32*) +declare @llvm.aarch64.sve.ld1.nxv4f32(, float*) + +declare @llvm.aarch64.sve.ld1.nxv2i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv2i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv2i32(, i32*) +declare @llvm.aarch64.sve.ld1.nxv2i64(, i64*) +declare @llvm.aarch64.sve.ld1.nxv2f64(, double*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-reg.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-reg.ll @@ -0,0 +1,217 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; LD1B +; + +define @ld1b_i8( %pg, i8* %a, i64 %index) { +; CHECK-LABEL: ld1b_i8 +; CHECK: ld1b { z0.b }, p0/z, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base) + ret %load +} + +define @ld1b_h( %pred, i8* %a, i64 %index) { +; CHECK-LABEL: ld1b_h: +; CHECK: ld1b { z0.h }, p0/z, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pred, i8* %base) + %res = zext %load to + ret %res +} + +define @ld1sb_h( %pred, i8* %a, i64 %index) { +; CHECK-LABEL: ld1sb_h: +; CHECK: ld1sb { z0.h }, p0/z, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pred, i8* %base) + %res = sext %load to + ret %res +} + +define @ld1b_s( %pred, i8* %a, i64 %index) { +; CHECK-LABEL: ld1b_s: +; CHECK: ld1b { z0.s }, p0/z, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pred, i8* %base) + %res = zext %load to + ret %res +} + +define @ld1sb_s( %pred, i8* %a, i64 %index) { +; CHECK-LABEL: ld1sb_s: +; CHECK: ld1sb { z0.s }, p0/z, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pred, i8* %base) + %res = sext %load to + ret %res +} + +define @ld1b_d( %pred, i8* %a, i64 %index) { +; CHECK-LABEL: ld1b_d: +; CHECK: ld1b { z0.d }, p0/z, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pred, i8* %base) + %res = zext %load to + ret %res +} + +define @ld1sb_d( %pred, i8* %a, i64 %index) { +; CHECK-LABEL: ld1sb_d: +; CHECK: ld1sb { z0.d }, p0/z, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pred, i8* %base) + %res = sext %load to + ret %res +} + +; +; LD1H +; + +define @ld1h_i16( %pg, i16* %a, i64 %index) { +; CHECK-LABEL: ld1h_i16 +; CHECK: ld1h { z0.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr i16, i16* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv8i16( %pg, i16* %base) + ret %load +} + +define @ld1h_f16( %pg, half* %a, i64 %index) { +; CHECK-LABEL: ld1h_f16 +; CHECK: ld1h { z0.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr half, half* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv8f16( %pg, half* %base) + ret %load +} + +define @ld1h_s( %pred, i16* %a, i64 %index) { +; CHECK-LABEL: ld1h_s: +; CHECK: ld1h { z0.s }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr i16, i16* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pred, i16* %base) + %res = zext %load to + ret %res +} + +define @ld1sh_s( %pred, i16* %a, i64 %index) { +; CHECK-LABEL: ld1sh_s: +; CHECK: ld1sh { z0.s }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr i16, i16* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pred, i16* %base) + %res = sext %load to + ret %res +} + +define @ld1h_d( %pred, i16* %a, i64 %index) { +; CHECK-LABEL: ld1h_d: +; CHECK: ld1h { z0.d }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr i16, i16* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pred, i16* %base) + %res = zext %load to + ret %res +} + +define @ld1sh_d( %pred, i16* %a, i64 %index) { +; CHECK-LABEL: ld1sh_d: +; CHECK: ld1sh { z0.d }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr i16, i16* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pred, i16* %base) + %res = sext %load to + ret %res +} + +; +; LD1W +; + +define @ld1w( %pg, i32* %a, i64 %index) { +; CHECK-LABEL: ld1w +; CHECK: ld1w { z0.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %base = getelementptr i32, i32* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv4i32( %pg, i32* %base) + ret %load +} + +define @ld1w_f32( %pg, float* %a, i64 %index) { +; CHECK-LABEL: ld1w_f32 +; CHECK: ld1w { z0.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %base = getelementptr float, float* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv4f32( %pg, float* %base) + ret %load +} + +define @ld1w_d( %pred, i32* %a, i64 %index) { +; CHECK-LABEL: ld1w_d: +; CHECK: ld1w { z0.d }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %base = getelementptr i32, i32* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pred, i32* %base) + %res = zext %load to + ret %res +} + +define @ld1sw_d( %pred, i32* %a, i64 %index) { +; CHECK-LABEL: ld1sw_d: +; CHECK: ld1sw { z0.d }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %base = getelementptr i32, i32* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pred, i32* %base) + %res = sext %load to + ret %res +} + +; +; LD1D +; + +define @ld1d( %pg, i64* %a, i64 %index) { +; CHECK-LABEL: ld1d +; CHECK: ld1d { z0.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %base = getelementptr i64, i64* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv2i64( %pg, i64* %base) + ret %load +} + +define @ld1d_f64( %pg, double* %a, i64 %index) { +; CHECK-LABEL: ld1d_f64 +; CHECK: ld1d { z0.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %base = getelementptr double, double* %a, i64 %index + %load = call @llvm.aarch64.sve.ld1.nxv2f64( %pg, double* %base) + ret %load +} + +declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) + +declare @llvm.aarch64.sve.ld1.nxv8i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv8i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv8f16(, half*) + +declare @llvm.aarch64.sve.ld1.nxv4i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv4i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv4i32(, i32*) +declare @llvm.aarch64.sve.ld1.nxv4f32(, float*) + +declare @llvm.aarch64.sve.ld1.nxv2i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv2i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv2i32(, i32*) +declare @llvm.aarch64.sve.ld1.nxv2i64(, i64*) +declare @llvm.aarch64.sve.ld1.nxv2f64(, double*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll @@ -66,89 +66,6 @@ ret %res } -define @ld1b_upper_bound( %pg, i8* %a) { -; CHECK-LABEL: ld1b_upper_bound: -; CHECK: ld1b { z0.b }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) - ret %load -} - -define @ld1b_inbound( %pg, i8* %a) { -; CHECK-LABEL: ld1b_inbound: -; CHECK: ld1b { z0.b }, p0/z, [x0, #1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 1 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) - ret %load -} - -define @ld1b_s_inbound( %pg, i8* %a) { -; CHECK-LABEL: ld1b_s_inbound: -; CHECK: ld1b { z0.s }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pg, i8* %base_scalar) - %res = zext %load to - ret %res -} - -define @ld1sb_s_inbound( %pg, i8* %a) { -; CHECK-LABEL: ld1sb_s_inbound: -; CHECK: ld1sb { z0.s }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pg, i8* %base_scalar) - %res = sext %load to - ret %res -} - -define @ld1b_lower_bound( %pg, i8* %a) { -; CHECK-LABEL: ld1b_lower_bound: -; CHECK: ld1b { z0.b }, p0/z, [x0, #-8, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 -8 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) - ret %load -} - -define @ld1b_out_of_upper_bound( %pg, i8* %a) { -; CHECK-LABEL: ld1b_out_of_upper_bound: -; CHECK: rdvl x[[OFFSET:[0-9]+]], #8 -; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]] -; CHECK-NEXT: ld1b { z0.b }, p0/z, [x[[BASE]]] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 8 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) - ret %load -} - -define @ld1b_out_of_lower_bound( %pg, i8* %a) { -; CHECK-LABEL: ld1b_out_of_lower_bound: -; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9 -; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]] -; CHECK-NEXT: ld1b { z0.b }, p0/z, [x[[BASE]]] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 -9 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) - ret %load -} - ; ; LD1H ; @@ -205,124 +122,6 @@ ret %res } -define @ld1b_h_inbound( %pg, i8* %a) { -; CHECK-LABEL: ld1b_h_inbound: -; CHECK: ld1b { z0.h }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pg, i8* %base_scalar) - %res = zext %load to - ret %res -} - -define @ld1sb_h_inbound( %pg, i8* %a) { -; CHECK-LABEL: ld1sb_h_inbound: -; CHECK: ld1sb { z0.h }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pg, i8* %base_scalar) - %res = sext %load to - ret %res -} - -define @ld1h_inbound( %pg, i16* %a) { -; CHECK-LABEL: ld1h_inbound: -; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i16* %a to * - %base = getelementptr , * %base_scalable, i64 1 - %base_scalar = bitcast * %base to i16* - %load = call @llvm.aarch64.sve.ld1.nxv8i16( %pg, i16* %base_scalar) - ret %load -} - -define @ld1h_s_inbound( %pg, i16* %a) { -; CHECK-LABEL: ld1h_s_inbound: -; CHECK: ld1h { z0.s }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i16* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i16* - %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pg, i16* %base_scalar) - %res = zext %load to - ret %res -} - -define @ld1sh_s_inbound( %pg, i16* %a) { -; CHECK-LABEL: ld1sh_s_inbound: -; CHECK: ld1sh { z0.s }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i16* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i16* - %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pg, i16* %base_scalar) - %res = sext %load to - ret %res -} - -define @ld1b_d_inbound( %pg, i8* %a) { -; CHECK-LABEL: ld1b_d_inbound: -; CHECK: ld1b { z0.d }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pg, i8* %base_scalar) - %res = zext %load to - ret %res -} - -define @ld1sb_d_inbound( %pg, i8* %a) { -; CHECK-LABEL: ld1sb_d_inbound: -; CHECK: ld1sb { z0.d }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pg, i8* %base_scalar) - %res = sext %load to - ret %res -} - -define @ld1h_d_inbound( %pg, i16* %a) { -; CHECK-LABEL: ld1h_d_inbound: -; CHECK: ld1h { z0.d }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i16* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i16* - %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pg, i16* %base_scalar) - %res = zext %load to - ret %res -} - -define @ld1sh_d_inbound( %pg, i16* %a) { -; CHECK-LABEL: ld1sh_d_inbound: -; CHECK: ld1sh { z0.d }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i16* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i16* - %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pg, i16* %base_scalar) - %res = sext %load to - ret %res -} - -define @ld1h_f16_inbound( %pg, half* %a) { -; CHECK-LABEL: ld1h_f16_inbound: -; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast half* %a to * - %base = getelementptr , * %base_scalable, i64 1 - %base_scalar = bitcast * %base to half* - %load = call @llvm.aarch64.sve.ld1.nxv8f16( %pg, half* %base_scalar) - ret %load -} - ; ; LD1W ; @@ -361,28 +160,6 @@ ret %res } -define @ld1w_inbound( %pg, i32* %a) { -; CHECK-LABEL: ld1w_inbound: -; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i32* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i32* - %load = call @llvm.aarch64.sve.ld1.nxv4i32( %pg, i32* %base_scalar) - ret %load -} - -define @ld1w_f32_inbound( %pg, float* %a) { -; CHECK-LABEL: ld1w_f32_inbound: -; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast float* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to float* - %load = call @llvm.aarch64.sve.ld1.nxv4f32( %pg, float* %base_scalar) - ret %load -} - ; ; LD1D ; @@ -405,52 +182,6 @@ ret %res } -define @ld1d_inbound( %pg, i64* %a) { -; CHECK-LABEL: ld1d_inbound: -; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i64* %a to * - %base = getelementptr , * %base_scalable, i64 1 - %base_scalar = bitcast * %base to i64* - %load = call @llvm.aarch64.sve.ld1.nxv2i64( %pg, i64* %base_scalar) - ret %load -} - -define @ld1w_d_inbound( %pg, i32* %a) { -; CHECK-LABEL: ld1w_d_inbound: -; CHECK: ld1w { z0.d }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i32* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i32* - %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pg, i32* %base_scalar) - %res = zext %load to - ret %res -} - -define @ld1sw_d_inbound( %pg, i32* %a) { -; CHECK-LABEL: ld1sw_d_inbound: -; CHECK: ld1sw { z0.d }, p0/z, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i32* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i32* - %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pg, i32* %base_scalar) - %res = sext %load to - ret %res -} - -define @ld1d_f64_inbound( %pg, double* %a) { -; CHECK-LABEL: ld1d_f64_inbound: -; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast double* %a to * - %base = getelementptr , * %base_scalable, i64 1 - %base_scalar = bitcast * %base to double* - %load = call @llvm.aarch64.sve.ld1.nxv2f64( %pg, double* %base_scalar) - ret %load -} - declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) declare @llvm.aarch64.sve.ld1.nxv8i8(, i8*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll @@ -0,0 +1,229 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; ST1B +; + +define void @st1b_upper_bound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_upper_bound: +; CHECK: st1b { z0.b }, p0, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_inbound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_inbound: +; CHECK: st1b { z0.b }, p0, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_lower_bound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_lower_bound: +; CHECK: st1b { z0.b }, p0, [x0, #-8, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -8 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_out_of_upper_bound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_out_of_upper_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #8 +; CHECK: st1b { z0.b }, p0, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 8 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_out_of_lower_bound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_out_of_lower_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9 +; CHECK: st1b { z0.b }, p0, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -9 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_s_inbound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_s_inbound: +; CHECK: st1b { z0.s }, p0, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv4i8( %trunc, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_h_inbound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_h_inbound: +; CHECK: st1b { z0.h }, p0, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i8* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv8i8( %trunc, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_d_inbound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_d_inbound: +; CHECK: st1b { z0.d }, p0, [x0, #-7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -7 + %base_scalar = bitcast * %base to i8* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i8( %trunc, %pg, i8* %base_scalar) + ret void +} + +; +; ST1H +; + +define void @st1h_inbound( %data, %pg, i16* %a) { +; CHECK-LABEL: st1h_inbound: +; CHECK: st1h { z0.h }, p0, [x0, #-1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 -1 + %base_scalar = bitcast * %base to i16* + call void @llvm.aarch64.sve.st1.nxv8i16( %data, %pg, i16* %base_scalar) + ret void +} + +define void @st1h_f16_inbound( %data, %pg, half* %a) { +; CHECK-LABEL: st1h_f16_inbound: +; CHECK: st1h { z0.h }, p0, [x0, #-5, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast half* %a to * + %base = getelementptr , * %base_scalable, i64 -5 + %base_scalar = bitcast * %base to half* + call void @llvm.aarch64.sve.st1.nxv8f16( %data, %pg, half* %base_scalar) + ret void +} + +define void @st1h_s_inbound( %data, %pg, i16* %a) { +; CHECK-LABEL: st1h_s_inbound: +; CHECK: st1h { z0.s }, p0, [x0, #2, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 2 + %base_scalar = bitcast * %base to i16* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv4i16( %trunc, %pg, i16* %base_scalar) + ret void +} + +define void @st1h_d_inbound( %data, %pg, i16* %a) { +; CHECK-LABEL: st1h_d_inbound: +; CHECK: st1h { z0.d }, p0, [x0, #-4, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 -4 + %base_scalar = bitcast * %base to i16* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i16( %trunc, %pg, i16* %base_scalar) + ret void +} + +; +; ST1W +; + +define void @st1w_inbound( %data, %pg, i32* %a) { +; CHECK-LABEL: st1w_inbound: +; CHECK: st1w { z0.s }, p0, [x0, #6, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 6 + %base_scalar = bitcast * %base to i32* + call void @llvm.aarch64.sve.st1.nxv4i32( %data, %pg, i32* %base_scalar) + ret void +} + +define void @st1w_f32_inbound( %data, %pg, float* %a) { +; CHECK-LABEL: st1w_f32_inbound: +; CHECK: st1w { z0.s }, p0, [x0, #-1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast float* %a to * + %base = getelementptr , * %base_scalable, i64 -1 + %base_scalar = bitcast * %base to float* + call void @llvm.aarch64.sve.st1.nxv4f32( %data, %pg, float* %base_scalar) + ret void +} + +define void @st1w_d_inbound( %data, %pg, i32* %a) { +; CHECK-LABEL: st1w_d_inbound: +; CHECK: st1w { z0.d }, p0, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i32* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i32( %trunc, %pg, i32* %base_scalar) + ret void +} + +; +; ST1D +; + +define void @st1d_inbound( %data, %pg, i64* %a) { +; CHECK-LABEL: st1d_inbound: +; CHECK: st1d { z0.d }, p0, [x0, #5, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i64* %a to * + %base = getelementptr , * %base_scalable, i64 5 + %base_scalar = bitcast * %base to i64* + call void @llvm.aarch64.sve.st1.nxv2i64( %data, %pg, i64* %base_scalar) + ret void +} + +define void @st1d_f64_inbound( %data, %pg, double* %a) { +; CHECK-LABEL: st1d_f64_inbound: +; CHECK: st1d { z0.d }, p0, [x0, #-8, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast double* %a to * + %base = getelementptr , * %base_scalable, i64 -8 + %base_scalar = bitcast * %base to double* + call void @llvm.aarch64.sve.st1.nxv2f64( %data, %pg, double* %base_scalar) + ret void +} + +declare void @llvm.aarch64.sve.st1.nxv16i8(, , i8*) + +declare void @llvm.aarch64.sve.st1.nxv8i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv8i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv8f16(, , half*) + +declare void @llvm.aarch64.sve.st1.nxv4i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv4i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv4i32(, , i32*) +declare void @llvm.aarch64.sve.st1.nxv4f32(, , float*) + +declare void @llvm.aarch64.sve.st1.nxv2i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv2i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv2i32(, , i32*) +declare void @llvm.aarch64.sve.st1.nxv2i64(, , i64*) +declare void @llvm.aarch64.sve.st1.nxv2f64(, , double*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll @@ -0,0 +1,184 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; ST1B +; + +define void @st1b_i8( %data, %pred, i8* %a, i64 %index) { +; CHECK-LABEL: st1b_i8: +; CHECK: st1b { z0.b }, p0, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + call void @llvm.aarch64.sve.st1.nxv16i8( %data, + %pred, + i8* %base) + ret void +} + + + +define void @st1b_h( %data, %pred, i8* %a, i64 %index) { +; CHECK-LABEL: st1b_h: +; CHECK: st1b { z0.h }, p0, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv8i8( %trunc, + %pred, + i8* %base) + ret void +} + +define void @st1b_s( %data, %pred, i8* %a, i64 %index) { +; CHECK-LABEL: st1b_s: +; CHECK: st1b { z0.s }, p0, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv4i8( %trunc, + %pred, + i8* %base) + ret void +} + +define void @st1b_d( %data, %pred, i8* %a, i64 %index) { +; CHECK-LABEL: st1b_d: +; CHECK: st1b { z0.d }, p0, [x0, x1] +; CHECK-NEXT: ret + %base = getelementptr i8, i8* %a, i64 %index + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i8( %trunc, + %pred, + i8* %base) + ret void +} + +; +; ST1H +; + +define void @st1h_i16( %data, %pred, i16* %a, i64 %index) { +; CHECK-LABEL: st1h_i16: +; CHECK: st1h { z0.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr i16, i16* %a, i64 %index + call void @llvm.aarch64.sve.st1.nxv8i16( %data, + %pred, + i16* %base) + ret void +} + +define void @st1h_f16( %data, %pred, half* %a, i64 %index) { +; CHECK-LABEL: st1h_f16: +; CHECK: st1h { z0.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr half, half* %a, i64 %index + call void @llvm.aarch64.sve.st1.nxv8f16( %data, + %pred, + half* %base) + ret void +} + +define void @st1h_s( %data, %pred, i16* %addr) { +; CHECK-LABEL: st1h_s: +; CHECK: st1h { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv4i16( %trunc, + %pred, + i16* %addr) + ret void +} + +define void @st1h_d( %data, %pred, i16* %a, i64 %index) { +; CHECK-LABEL: st1h_d: +; CHECK: st1h { z0.d }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr i16, i16* %a, i64 %index + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i16( %trunc, + %pred, + i16* %base) + ret void +} + +; +; ST1W +; + +define void @st1w_i32( %data, %pred, i32* %a, i64 %index) { +; CHECK-LABEL: st1w_i32: +; CHECK: st1w { z0.s }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %base = getelementptr i32, i32* %a, i64 %index + call void @llvm.aarch64.sve.st1.nxv4i32( %data, + %pred, + i32* %base) + ret void +} + +define void @st1w_f32( %data, %pred, float* %a, i64 %index) { +; CHECK-LABEL: st1w_f32: +; CHECK: st1w { z0.s }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %base = getelementptr float, float* %a, i64 %index + call void @llvm.aarch64.sve.st1.nxv4f32( %data, + %pred, + float* %base) + ret void +} + +define void @st1w_d( %data, %pred, i32* %a, i64 %index) { +; CHECK-LABEL: st1w_d: +; CHECK: st1w { z0.d }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %base = getelementptr i32, i32* %a, i64 %index + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i32( %trunc, + %pred, + i32* %base) + ret void +} + +; +; ST1D +; + +define void @st1d_i64( %data, %pred, i64* %a, i64 %index) { +; CHECK-LABEL: st1d_i64: +; CHECK: st1d { z0.d }, p0, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %base = getelementptr i64, i64* %a, i64 %index + call void @llvm.aarch64.sve.st1.nxv2i64( %data, + %pred, + i64* %base) + ret void +} + +define void @st1d_f64( %data, %pred, double* %a, i64 %index) { +; CHECK-LABEL: st1d_f64: +; CHECK: st1d { z0.d }, p0, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %base = getelementptr double, double* %a, i64 %index + call void @llvm.aarch64.sve.st1.nxv2f64( %data, + %pred, + double* %base) + ret void +} + +declare void @llvm.aarch64.sve.st1.nxv16i8(, , i8*) + +declare void @llvm.aarch64.sve.st1.nxv8i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv8i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv8f16(, , half*) + +declare void @llvm.aarch64.sve.st1.nxv4i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv4i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv4i32(, , i32*) +declare void @llvm.aarch64.sve.st1.nxv4f32(, , float*) + +declare void @llvm.aarch64.sve.st1.nxv2i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv2i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv2i32(, , i32*) +declare void @llvm.aarch64.sve.st1.nxv2i64(, , i64*) +declare void @llvm.aarch64.sve.st1.nxv2f64(, , double*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll @@ -47,101 +47,6 @@ ret void } -define void @st1b_upper_bound( %data, %pg, i8* %a) { -; CHECK-LABEL: st1b_upper_bound: -; CHECK: st1b { z0.b }, p0, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) - ret void -} - -define void @st1b_inbound( %data, %pg, i8* %a) { -; CHECK-LABEL: st1b_inbound: -; CHECK: st1b { z0.b }, p0, [x0, #1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 1 - %base_scalar = bitcast * %base to i8* - call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) - ret void -} - -define void @st1b_lower_bound( %data, %pg, i8* %a) { -; CHECK-LABEL: st1b_lower_bound: -; CHECK: st1b { z0.b }, p0, [x0, #-8, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 -8 - %base_scalar = bitcast * %base to i8* - call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) - ret void -} - -define void @st1b_out_of_upper_bound( %data, %pg, i8* %a) { -; CHECK-LABEL: st1b_out_of_upper_bound: -; CHECK: rdvl x[[OFFSET:[0-9]+]], #8 -; CHECK: add x[[BASE:[0-9]+]], x0, x[[OFFSET]] -; CHECK: st1b { z0.b }, p0, [x[[BASE]]] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 8 - %base_scalar = bitcast * %base to i8* - call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) - ret void -} - -define void @st1b_out_of_lower_bound( %data, %pg, i8* %a) { -; CHECK-LABEL: st1b_out_of_lower_bound: -; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9 -; CHECK: add x[[BASE:[0-9]+]], x0, x[[OFFSET]] -; CHECK: st1b { z0.b }, p0, [x[[BASE]]] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 -9 - %base_scalar = bitcast * %base to i8* - call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) - ret void -} - -define void @st1b_s_inbound( %data, %pg, i8* %a) { -; CHECK-LABEL: st1b_s_inbound: -; CHECK: st1b { z0.s }, p0, [x0, #7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 7 - %base_scalar = bitcast * %base to i8* - %trunc = trunc %data to - call void @llvm.aarch64.sve.st1.nxv4i8( %trunc, %pg, i8* %base_scalar) - ret void -} - -define void @st1b_h_inbound( %data, %pg, i8* %a) { -; CHECK-LABEL: st1b_h_inbound: -; CHECK: st1b { z0.h }, p0, [x0, #1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 1 - %base_scalar = bitcast * %base to i8* - %trunc = trunc %data to - call void @llvm.aarch64.sve.st1.nxv8i8( %trunc, %pg, i8* %base_scalar) - ret void -} - -define void @st1b_d_inbound( %data, %pg, i8* %a) { -; CHECK-LABEL: st1b_d_inbound: -; CHECK: st1b { z0.d }, p0, [x0, #-7, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i8* %a to * - %base = getelementptr , * %base_scalable, i64 -7 - %base_scalar = bitcast * %base to i8* - %trunc = trunc %data to - call void @llvm.aarch64.sve.st1.nxv2i8( %trunc, %pg, i8* %base_scalar) - ret void -} - ; ; ST1H ; @@ -188,52 +93,6 @@ ret void } -define void @st1h_inbound( %data, %pg, i16* %a) { -; CHECK-LABEL: st1h_inbound: -; CHECK: st1h { z0.h }, p0, [x0, #-1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i16* %a to * - %base = getelementptr , * %base_scalable, i64 -1 - %base_scalar = bitcast * %base to i16* - call void @llvm.aarch64.sve.st1.nxv8i16( %data, %pg, i16* %base_scalar) - ret void -} - -define void @st1h_f16_inbound( %data, %pg, half* %a) { -; CHECK-LABEL: st1h_f16_inbound: -; CHECK: st1h { z0.h }, p0, [x0, #-5, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast half* %a to * - %base = getelementptr , * %base_scalable, i64 -5 - %base_scalar = bitcast * %base to half* - call void @llvm.aarch64.sve.st1.nxv8f16( %data, %pg, half* %base_scalar) - ret void -} - -define void @st1h_s_inbound( %data, %pg, i16* %a) { -; CHECK-LABEL: st1h_s_inbound: -; CHECK: st1h { z0.s }, p0, [x0, #2, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i16* %a to * - %base = getelementptr , * %base_scalable, i64 2 - %base_scalar = bitcast * %base to i16* - %trunc = trunc %data to - call void @llvm.aarch64.sve.st1.nxv4i16( %trunc, %pg, i16* %base_scalar) - ret void -} - -define void @st1h_d_inbound( %data, %pg, i16* %a) { -; CHECK-LABEL: st1h_d_inbound: -; CHECK: st1h { z0.d }, p0, [x0, #-4, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i16* %a to * - %base = getelementptr , * %base_scalable, i64 -4 - %base_scalar = bitcast * %base to i16* - %trunc = trunc %data to - call void @llvm.aarch64.sve.st1.nxv2i16( %trunc, %pg, i16* %base_scalar) - ret void -} - ; ; ST1W ; @@ -269,40 +128,6 @@ ret void } -define void @st1w_inbound( %data, %pg, i32* %a) { -; CHECK-LABEL: st1w_inbound: -; CHECK: st1w { z0.s }, p0, [x0, #6, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i32* %a to * - %base = getelementptr , * %base_scalable, i64 6 - %base_scalar = bitcast * %base to i32* - call void @llvm.aarch64.sve.st1.nxv4i32( %data, %pg, i32* %base_scalar) - ret void -} - -define void @st1w_f32_inbound( %data, %pg, float* %a) { -; CHECK-LABEL: st1w_f32_inbound: -; CHECK: st1w { z0.s }, p0, [x0, #-1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast float* %a to * - %base = getelementptr , * %base_scalable, i64 -1 - %base_scalar = bitcast * %base to float* - call void @llvm.aarch64.sve.st1.nxv4f32( %data, %pg, float* %base_scalar) - ret void -} - -define void @st1w_d_inbound( %data, %pg, i32* %a) { -; CHECK-LABEL: st1w_d_inbound: -; CHECK: st1w { z0.d }, p0, [x0, #1, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i32* %a to * - %base = getelementptr , * %base_scalable, i64 1 - %base_scalar = bitcast * %base to i32* - %trunc = trunc %data to - call void @llvm.aarch64.sve.st1.nxv2i32( %trunc, %pg, i32* %base_scalar) - ret void -} - ; ; ST1D ; @@ -327,28 +152,6 @@ ret void } -define void @st1d_inbound( %data, %pg, i64* %a) { -; CHECK-LABEL: st1d_inbound: -; CHECK: st1d { z0.d }, p0, [x0, #5, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast i64* %a to * - %base = getelementptr , * %base_scalable, i64 5 - %base_scalar = bitcast * %base to i64* - call void @llvm.aarch64.sve.st1.nxv2i64( %data, %pg, i64* %base_scalar) - ret void -} - -define void @st1d_f64_inbound( %data, %pg, double* %a) { -; CHECK-LABEL: st1d_f64_inbound: -; CHECK: st1d { z0.d }, p0, [x0, #-8, mul vl] -; CHECK-NEXT: ret - %base_scalable = bitcast double* %a to * - %base = getelementptr , * %base_scalable, i64 -8 - %base_scalar = bitcast * %base to double* - call void @llvm.aarch64.sve.st1.nxv2f64( %data, %pg, double* %base_scalar) - ret void -} - declare void @llvm.aarch64.sve.st1.nxv16i8(, , i8*) declare void @llvm.aarch64.sve.st1.nxv8i8(, , i8*)