Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -815,7 +815,7 @@ [IntrReadMem, IntrArgMemOnly, ImmArg>]>; class AdvSIMD_ManyVec_PredLoad_Intrinsic - : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyptr_ty], + : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMPointerToElt<0>], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_1Vec_PredLoad_Intrinsic @@ -834,20 +834,20 @@ class AdvSIMD_2Vec_PredStore_Intrinsic : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerTo<0>], + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>], [IntrArgMemOnly, NoCapture>]>; class AdvSIMD_3Vec_PredStore_Intrinsic : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerTo<0>], + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>], [IntrArgMemOnly, NoCapture>]>; class AdvSIMD_4Vec_PredStore_Intrinsic : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerTo<0>], + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>], [IntrArgMemOnly, NoCapture>]>; class AdvSIMD_SVE_Index_Intrinsic Index: llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll @@ -256,12 +256,11 @@ ; LD2B ; -define @ld2b_i8( %pred, * %addr) { +define @ld2b_i8( %pred, i8* %addr) { ; CHECK-LABEL: ld2b_i8: ; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0nxv16i8( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8( %pred, i8* %addr) ret %res } @@ -269,21 +268,19 @@ ; LD2H ; -define @ld2h_i16( %pred, * %addr) { +define @ld2h_i16( %pred, i16* %addr) { ; CHECK-LABEL: ld2h_i16: ; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0nxv8i16( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16( %pred, i16* %addr) ret %res } -define @ld2h_f16( %pred, * %addr) { +define @ld2h_f16( %pred, half* %addr) { ; CHECK-LABEL: ld2h_f16: ; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0nxv8f16( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16( %pred, half* %addr) ret %res } @@ -291,21 +288,19 @@ ; LD2W ; -define @ld2w_i32( %pred, * %addr) { +define @ld2w_i32( %pred, i32* %addr) { ; CHECK-LABEL: ld2w_i32: ; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0nxv4i32( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32( %pred, i32* %addr) ret %res } -define @ld2w_f32( %pred, * %addr) { +define @ld2w_f32( %pred, float* %addr) { ; CHECK-LABEL: ld2w_f32: ; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0nxv4f32( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32( %pred, float* %addr) ret %res } @@ -313,21 +308,19 @@ ; LD2D ; -define @ld2d_i64( %pred, * %addr) { +define @ld2d_i64( %pred, i64* %addr) { ; CHECK-LABEL: ld2d_i64: ; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0nxv2i64( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64( %pred, i64* %addr) ret %res } -define @ld2d_f64( %pred, * %addr) { +define @ld2d_f64( %pred, double* %addr) { ; CHECK-LABEL: ld2d_f64: ; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0nxv2f64( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64( %pred, double* %addr) ret %res } @@ -335,12 +328,11 @@ ; LD3B ; -define @ld3b_i8( %pred, * %addr) { +define @ld3b_i8( %pred, i8* %addr) { ; CHECK-LABEL: ld3b_i8: ; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0nxv16i8( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8( %pred, i8* %addr) ret %res } @@ -348,21 +340,19 @@ ; LD3H ; -define @ld3h_i16( %pred, * %addr) { +define @ld3h_i16( %pred, i16* %addr) { ; CHECK-LABEL: ld3h_i16: ; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0nxv8i16( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16( %pred, i16* %addr) ret %res } -define @ld3h_f16( %pred, * %addr) { +define @ld3h_f16( %pred, half* %addr) { ; CHECK-LABEL: ld3h_f16: ; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0nxv8f16( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16( %pred, half* %addr) ret %res } @@ -370,21 +360,19 @@ ; LD3W ; -define @ld3w_i32( %pred, * %addr) { +define @ld3w_i32( %pred, i32* %addr) { ; CHECK-LABEL: ld3w_i32: ; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0nxv4i32( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32( %pred, i32* %addr) ret %res } -define @ld3w_f32( %pred, * %addr) { +define @ld3w_f32( %pred, float* %addr) { ; CHECK-LABEL: ld3w_f32: ; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0nxv4f32( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32( %pred, float* %addr) ret %res } @@ -392,21 +380,19 @@ ; LD3D ; -define @ld3d_i64( %pred, * %addr) { +define @ld3d_i64( %pred, i64* %addr) { ; CHECK-LABEL: ld3d_i64: ; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0nxv2i64( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64( %pred, i64* %addr) ret %res } -define @ld3d_f64( %pred, * %addr) { +define @ld3d_f64( %pred, double* %addr) { ; CHECK-LABEL: ld3d_f64: ; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0nxv2f64( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64( %pred, double* %addr) ret %res } @@ -414,12 +400,11 @@ ; LD4B ; -define @ld4b_i8( %pred, * %addr) { +define @ld4b_i8( %pred, i8* %addr) { ; CHECK-LABEL: ld4b_i8: ; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0nxv16i8( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8( %pred, i8* %addr) ret %res } @@ -427,21 +412,19 @@ ; LD4H ; -define @ld4h_i16( %pred, * %addr) { +define @ld4h_i16( %pred, i16* %addr) { ; CHECK-LABEL: ld4h_i16: ; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0nxv8i16( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16( %pred, i16* %addr) ret %res } -define @ld4h_f16( %pred, * %addr) { +define @ld4h_f16( %pred, half* %addr) { ; CHECK-LABEL: ld4h_f16: ; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0nxv8f16( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16( %pred, half* %addr) ret %res } @@ -449,21 +432,19 @@ ; LD4W ; -define @ld4w_i32( %pred, * %addr) { +define @ld4w_i32( %pred, i32* %addr) { ; CHECK-LABEL: ld4w_i32: ; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0nxv4i32( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32( %pred, i32* %addr) ret %res } -define @ld4w_f32( %pred, * %addr) { +define @ld4w_f32( %pred, float* %addr) { ; CHECK-LABEL: ld4w_f32: ; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0nxv4f32( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32( %pred, float* %addr) ret %res } @@ -471,21 +452,19 @@ ; LD4D ; -define @ld4d_i64( %pred, * %addr) { +define @ld4d_i64( %pred, i64* %addr) { ; CHECK-LABEL: ld4d_i64: ; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0nxv2i64( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64( %pred, i64* %addr) ret %res } -define @ld4d_f64( %pred, * %addr) { +define @ld4d_f64( %pred, double* %addr) { ; CHECK-LABEL: ld4d_f64: ; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0] ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0nxv2f64( %pred, - * %addr) + %res = call @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64( %pred, double* %addr) ret %res } @@ -506,26 +485,26 @@ declare @llvm.aarch64.sve.ldnt1.nxv4f32(, float*) declare @llvm.aarch64.sve.ldnt1.nxv2f64(, double*) -declare @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0nxv16i8(, *) -declare @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0nxv8i16(, *) -declare @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0nxv4i32(, *) -declare @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0nxv2i64(, *) -declare @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0nxv8f16(, *) -declare @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0nxv4f32(, *) -declare @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0nxv2f64(, *) - -declare @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0nxv16i8(, *) -declare @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0nxv8i16(, *) -declare @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0nxv4i32(, *) -declare @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0nxv2i64(, *) -declare @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0nxv8f16(, *) -declare @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0nxv4f32(, *) -declare @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0nxv2f64(, *) - -declare @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0nxv16i8(, *) -declare @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0nxv8i16(, *) -declare @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0nxv4i32(, *) -declare @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0nxv2i64(, *) -declare @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0nxv8f16(, *) -declare @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0nxv4f32(, *) -declare @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0nxv2f64(, *) +declare @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(, i8*) +declare @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(, i16*) +declare @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(, i32*) +declare @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(, i64*) +declare @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(, half*) +declare @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(, float*) +declare @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(, double*) + +declare @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(, i8*) +declare @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(, i16*) +declare @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(, i32*) +declare @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(, i64*) +declare @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(, half*) +declare @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(, float*) +declare @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(, double*) + +declare @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(, i8*) +declare @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(, i16*) +declare @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(, i32*) +declare @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(, i64*) +declare @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(, half*) +declare @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(, float*) +declare @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(, double*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll @@ -14,11 +14,11 @@ ; CHECK-LABEL: st2b_i8_valid_imm: ; CHECK: st2b { z0.b, z1.b }, p0, [x0, #2, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 2 + %base = getelementptr , * %addr, i64 2, i64 0 call void @llvm.aarch64.sve.st2.nxv16i8( %v0, %v1, %pred, - * %base) + i8* %base) ret void } @@ -27,11 +27,11 @@ ; CHECK: rdvl x[[N:[0-9]+]], #3 ; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 3 + %base = getelementptr , * %addr, i64 3, i64 0 call void @llvm.aarch64.sve.st2.nxv16i8( %v0, %v1, %pred, - * %base) + i8* %base) ret void } @@ -40,11 +40,11 @@ ; CHECK: rdvl x[[N:[0-9]+]], #-18 ; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 -18 + %base = getelementptr , * %addr, i64 -18, i64 0 call void @llvm.aarch64.sve.st2.nxv16i8( %v0, %v1, %pred, - * %base) + i8* %base) ret void } @@ -53,11 +53,11 @@ ; CHECK: rdvl x[[N:[0-9]+]], #16 ; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 16 + %base = getelementptr , * %addr, i64 16, i64 0 call void @llvm.aarch64.sve.st2.nxv16i8( %v0, %v1, %pred, - * %base) + i8* %base) ret void } @@ -65,11 +65,11 @@ ; CHECK-LABEL: st2b_i8_valid_imm_lower_bound: ; CHECK: st2b { z0.b, z1.b }, p0, [x0, #-16, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 -16 + %base = getelementptr , * %addr, i64 -16, i64 0 call void @llvm.aarch64.sve.st2.nxv16i8( %v0, %v1, %pred, - * %base) + i8* %base) ret void } @@ -77,11 +77,11 @@ ; CHECK-LABEL: st2b_i8_valid_imm_upper_bound: ; CHECK: st2b { z0.b, z1.b }, p0, [x0, #14, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 14 + %base = getelementptr , * %addr, i64 14, i64 0 call void @llvm.aarch64.sve.st2.nxv16i8( %v0, %v1, %pred, - * %base) + i8* %base) ret void } @@ -93,11 +93,11 @@ ; CHECK-LABEL: st2h_i16: ; CHECK: st2h { z0.h, z1.h }, p0, [x0, #2, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 2 + %base = getelementptr , * %addr, i64 2, i64 0 call void @llvm.aarch64.sve.st2.nxv8i16( %v0, %v1, %pred, - * %base) + i16* %base) ret void } @@ -105,11 +105,11 @@ ; CHECK-LABEL: st2h_f16: ; CHECK: st2h { z0.h, z1.h }, p0, [x0, #2, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 2 + %base = getelementptr , * %addr, i64 2, i64 0 call void @llvm.aarch64.sve.st2.nxv8f16( %v0, %v1, %pred, - * %base) + half* %base) ret void } @@ -121,11 +121,11 @@ ; CHECK-LABEL: st2w_i32: ; CHECK: st2w { z0.s, z1.s }, p0, [x0, #4, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 4 + %base = getelementptr , * %addr, i64 4, i64 0 call void @llvm.aarch64.sve.st2.nxv4i32( %v0, %v1, %pred, - * %base) + i32* %base) ret void } @@ -133,11 +133,11 @@ ; CHECK-LABEL: st2w_f32: ; CHECK: st2w { z0.s, z1.s }, p0, [x0, #6, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 6 + %base = getelementptr , * %addr, i64 6, i64 0 call void @llvm.aarch64.sve.st2.nxv4f32( %v0, %v1, %pred, - * %base) + float* %base) ret void } @@ -149,11 +149,11 @@ ; CHECK-LABEL: st2d_i64: ; CHECK: st2d { z0.d, z1.d }, p0, [x0, #8, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 8 + %base = getelementptr , * %addr, i64 8, i64 0 call void @llvm.aarch64.sve.st2.nxv2i64( %v0, %v1, %pred, - * %base) + i64* %base) ret void } @@ -161,11 +161,11 @@ ; CHECK-LABEL: st2d_f64: ; CHECK: st2d { z0.d, z1.d }, p0, [x0, #10, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 10 + %base = getelementptr , * %addr, i64 10, i64 0 call void @llvm.aarch64.sve.st2.nxv2f64( %v0, %v1, %pred, - * %base) + double* %base) ret void } @@ -177,12 +177,12 @@ ; CHECK-LABEL: st3b_i8_valid_imm: ; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #3, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 3 + %base = getelementptr , * %addr, i64 3, i64 0 call void @llvm.aarch64.sve.st3.nxv16i8( %v0, %v1, %v2, %pred, - * %base) + i8* %base) ret void } @@ -191,12 +191,12 @@ ; CHECK: rdvl x[[N:[0-9]+]], #4 ; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 4 + %base = getelementptr , * %addr, i64 4, i64 0 call void @llvm.aarch64.sve.st3.nxv16i8( %v0, %v1, %v2, %pred, - * %base) + i8* %base) ret void } @@ -205,12 +205,12 @@ ; CHECK: rdvl x[[N:[0-9]+]], #5 ; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 5 + %base = getelementptr , * %addr, i64 5, i64 0 call void @llvm.aarch64.sve.st3.nxv16i8( %v0, %v1, %v2, %pred, - * %base) + i8* %base) ret void } @@ -219,12 +219,12 @@ ; CHECK: rdvl x[[N:[0-9]+]], #-27 ; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 -27 + %base = getelementptr , * %addr, i64 -27, i64 0 call void @llvm.aarch64.sve.st3.nxv16i8( %v0, %v1, %v2, %pred, - * %base) + i8* %base) ret void } @@ -233,12 +233,12 @@ ; CHECK: rdvl x[[N:[0-9]+]], #24 ; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 24 + %base = getelementptr , * %addr, i64 24, i64 0 call void @llvm.aarch64.sve.st3.nxv16i8( %v0, %v1, %v2, %pred, - * %base) + i8* %base) ret void } @@ -246,12 +246,12 @@ ; CHECK-LABEL: st3b_i8_valid_imm_lower_bound: ; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #-24, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 -24 + %base = getelementptr , * %addr, i64 -24, i64 0 call void @llvm.aarch64.sve.st3.nxv16i8( %v0, %v1, %v2, %pred, - * %base) + i8* %base) ret void } @@ -259,12 +259,12 @@ ; CHECK-LABEL: st3b_i8_valid_imm_upper_bound: ; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #21, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 21 + %base = getelementptr , * %addr, i64 21, i64 0 call void @llvm.aarch64.sve.st3.nxv16i8( %v0, %v1, %v2, %pred, - * %base) + i8* %base) ret void } @@ -276,12 +276,12 @@ ; CHECK-LABEL: st3h_i16: ; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, #6, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 6 + %base = getelementptr , * %addr, i64 6, i64 0 call void @llvm.aarch64.sve.st3.nxv8i16( %v0, %v1, %v2, %pred, - * %base) + i16* %base) ret void } @@ -289,12 +289,12 @@ ; CHECK-LABEL: st3h_f16: ; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, #9, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 9 + %base = getelementptr , * %addr, i64 9, i64 0 call void @llvm.aarch64.sve.st3.nxv8f16( %v0, %v1, %v2, %pred, - * %base) + half* %base) ret void } @@ -306,12 +306,12 @@ ; CHECK-LABEL: st3w_i32: ; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, #12, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 12 + %base = getelementptr , * %addr, i64 12, i64 0 call void @llvm.aarch64.sve.st3.nxv4i32( %v0, %v1, %v2, %pred, - * %base) + i32* %base) ret void } @@ -319,12 +319,12 @@ ; CHECK-LABEL: st3w_f32: ; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, #15, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 15 + %base = getelementptr , * %addr, i64 15, i64 0 call void @llvm.aarch64.sve.st3.nxv4f32( %v0, %v1, %v2, %pred, - * %base) + float* %base) ret void } @@ -336,12 +336,12 @@ ; CHECK-LABEL: st3d_i64: ; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, #18, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 18 + %base = getelementptr , * %addr, i64 18, i64 0 call void @llvm.aarch64.sve.st3.nxv2i64( %v0, %v1, %v2, %pred, - * %base) + i64* %base) ret void } @@ -349,12 +349,12 @@ ; CHECK-LABEL: st3d_f64: ; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, #-3, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 -3 + %base = getelementptr , * %addr, i64 -3, i64 0 call void @llvm.aarch64.sve.st3.nxv2f64( %v0, %v1, %v2, %pred, - * %base) + double* %base) ret void } @@ -366,13 +366,13 @@ ; CHECK-LABEL: st4b_i8_valid_imm: ; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #4, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 4 + %base = getelementptr , * %addr, i64 4, i64 0 call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -381,13 +381,13 @@ ; CHECK: rdvl x[[N:[0-9]+]], #5 ; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 5 + %base = getelementptr , * %addr, i64 5, i64 0 call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -396,13 +396,13 @@ ; CHECK: rdvl x[[N:[0-9]+]], #6 ; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 6 + %base = getelementptr , * %addr, i64 6, i64 0 call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -411,13 +411,13 @@ ; CHECK: rdvl x[[N:[0-9]+]], #7 ; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 7 + %base = getelementptr , * %addr, i64 7, i64 0 call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -433,13 +433,13 @@ ; CHECK-DAG: mul x[[OFFSET:[0-9]+]], x[[P]], x[[M]] ; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[OFFSET]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 -36 + %base = getelementptr , * %addr, i64 -36, i64 0 call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -455,13 +455,13 @@ ; CHECK-DAG: mul x[[OFFSET:[0-9]+]], x[[P]], x[[M]] ; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[OFFSET]]] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 32 + %base = getelementptr , * %addr, i64 32, i64 0 call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -469,13 +469,13 @@ ; CHECK-LABEL: st4b_i8_valid_imm_lower_bound: ; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #-32, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 -32 + %base = getelementptr , * %addr, i64 -32, i64 0 call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -483,13 +483,13 @@ ; CHECK-LABEL: st4b_i8_valid_imm_upper_bound: ; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #28, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 28 + %base = getelementptr , * %addr, i64 28, i64 0 call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -501,13 +501,13 @@ ; CHECK-LABEL: st4h_i16: ; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, #8, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 8 + %base = getelementptr , * %addr, i64 8, i64 0 call void @llvm.aarch64.sve.st4.nxv8i16( %v0, %v1, %v2, %v3, %pred, - * %base) + i16* %base) ret void } @@ -515,13 +515,13 @@ ; CHECK-LABEL: st4h_f16: ; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, #12, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 12 + %base = getelementptr , * %addr, i64 12, i64 0 call void @llvm.aarch64.sve.st4.nxv8f16( %v0, %v1, %v2, %v3, %pred, - * %base) + half* %base) ret void } @@ -533,13 +533,13 @@ ; CHECK-LABEL: st4w_i32: ; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, #16, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 16 + %base = getelementptr , * %addr, i64 16, i64 0 call void @llvm.aarch64.sve.st4.nxv4i32( %v0, %v1, %v2, %v3, %pred, - * %base) + i32* %base) ret void } @@ -547,13 +547,13 @@ ; CHECK-LABEL: st4w_f32: ; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, #20, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 20 + %base = getelementptr , * %addr, i64 20, i64 0 call void @llvm.aarch64.sve.st4.nxv4f32( %v0, %v1, %v2, %v3, %pred, - * %base) + float* %base) ret void } @@ -565,13 +565,13 @@ ; CHECK-LABEL: st4d_i64: ; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, #24, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 24 + %base = getelementptr , * %addr, i64 24, i64 0 call void @llvm.aarch64.sve.st4.nxv2i64( %v0, %v1, %v2, %v3, %pred, - * %base) + i64* %base) ret void } @@ -579,36 +579,36 @@ ; CHECK-LABEL: st4d_f64: ; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, #28, mul vl] ; CHECK-NEXT: ret - %base = getelementptr , * %addr, i64 28 + %base = getelementptr , * %addr, i64 28, i64 0 call void @llvm.aarch64.sve.st4.nxv2f64( %v0, %v1, %v2, %v3, %pred, - * %base) - ret void -} - -declare void @llvm.aarch64.sve.st2.nxv16i8(, , , *) -declare void @llvm.aarch64.sve.st2.nxv8i16(, , , *) -declare void @llvm.aarch64.sve.st2.nxv4i32(, , , *) -declare void @llvm.aarch64.sve.st2.nxv2i64(, , , *) -declare void @llvm.aarch64.sve.st2.nxv8f16(, , , *) -declare void @llvm.aarch64.sve.st2.nxv4f32(, , , *) -declare void @llvm.aarch64.sve.st2.nxv2f64(, , , *) - -declare void @llvm.aarch64.sve.st3.nxv16i8(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv8i16(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv4i32(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv2i64(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv8f16(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv4f32(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv2f64(, , , , *) - -declare void @llvm.aarch64.sve.st4.nxv16i8(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv8i16(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv4i32(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv2i64(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv8f16(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv4f32(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv2f64(, , , , , *) + double* %base) + ret void +} + +declare void @llvm.aarch64.sve.st2.nxv16i8(, , , i8*) +declare void @llvm.aarch64.sve.st2.nxv8i16(, , , i16*) +declare void @llvm.aarch64.sve.st2.nxv4i32(, , , i32*) +declare void @llvm.aarch64.sve.st2.nxv2i64(, , , i64*) +declare void @llvm.aarch64.sve.st2.nxv8f16(, , , half*) +declare void @llvm.aarch64.sve.st2.nxv4f32(, , , float*) +declare void @llvm.aarch64.sve.st2.nxv2f64(, , , double*) + +declare void @llvm.aarch64.sve.st3.nxv16i8(, , , , i8*) +declare void @llvm.aarch64.sve.st3.nxv8i16(, , , , i16*) +declare void @llvm.aarch64.sve.st3.nxv4i32(, , , , i32*) +declare void @llvm.aarch64.sve.st3.nxv2i64(, , , , i64*) +declare void @llvm.aarch64.sve.st3.nxv8f16(, , , , half*) +declare void @llvm.aarch64.sve.st3.nxv4f32(, , , , float*) +declare void @llvm.aarch64.sve.st3.nxv2f64(, , , , double*) + +declare void @llvm.aarch64.sve.st4.nxv16i8(, , , , , i8*) +declare void @llvm.aarch64.sve.st4.nxv8i16(, , , , , i16*) +declare void @llvm.aarch64.sve.st4.nxv4i32(, , , , , i32*) +declare void @llvm.aarch64.sve.st4.nxv2i64(, , , , , i64*) +declare void @llvm.aarch64.sve.st4.nxv8f16(, , , , , half*) +declare void @llvm.aarch64.sve.st4.nxv4f32(, , , , , float*) +declare void @llvm.aarch64.sve.st4.nxv2f64(, , , , , double*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll @@ -9,11 +9,10 @@ ; CHECK: st2b { z0.b, z1.b }, p0, [x0, x1] ; CHECK-NEXT: ret %1 = getelementptr i8, i8* %addr, i64 %offset - %base = bitcast i8* %1 to * call void @llvm.aarch64.sve.st2.nxv16i8( %v0, %v1, %pred, - * %base) + i8* %base) ret void } @@ -26,11 +25,10 @@ ; CHECK: st2h { z0.h, z1.h }, p0, [x0, x1, lsl #1] ; CHECK-NEXT: ret %1 = getelementptr i16, i16* %addr, i64 %offset - %base = bitcast i16* %1 to * call void @llvm.aarch64.sve.st2.nxv8i16( %v0, %v1, %pred, - * %base) + i16* %base) ret void } @@ -39,11 +37,10 @@ ; CHECK: st2h { z0.h, z1.h }, p0, [x0, x1, lsl #1] ; CHECK-NEXT: ret %1 = getelementptr half, half* %addr, i64 %offset - %base = bitcast half* %1 to * call void @llvm.aarch64.sve.st2.nxv8f16( %v0, %v1, %pred, - * %base) + half* %base) ret void } @@ -56,11 +53,10 @@ ; CHECK: st2w { z0.s, z1.s }, p0, [x0, x1, lsl #2] ; CHECK-NEXT: ret %1 = getelementptr i32, i32* %addr, i64 %offset - %base = bitcast i32* %1 to * call void @llvm.aarch64.sve.st2.nxv4i32( %v0, %v1, %pred, - * %base) + i32* %base) ret void } @@ -69,11 +65,10 @@ ; CHECK: st2w { z0.s, z1.s }, p0, [x0, x1, lsl #2] ; CHECK-NEXT: ret %1 = getelementptr float, float* %addr, i64 %offset - %base = bitcast float* %1 to * call void @llvm.aarch64.sve.st2.nxv4f32( %v0, %v1, %pred, - * %base) + float* %base) ret void } @@ -86,11 +81,10 @@ ; CHECK: st2d { z0.d, z1.d }, p0, [x0, x1, lsl #3] ; CHECK-NEXT: ret %1 = getelementptr i64, i64* %addr, i64 %offset - %base = bitcast i64* %1 to * call void @llvm.aarch64.sve.st2.nxv2i64( %v0, %v1, %pred, - * %base) + i64* %base) ret void } @@ -99,11 +93,10 @@ ; CHECK: st2d { z0.d, z1.d }, p0, [x0, x1, lsl #3] ; CHECK-NEXT: ret %1 = getelementptr double, double* %addr, i64 %offset - %base = bitcast double* %1 to * call void @llvm.aarch64.sve.st2.nxv2f64( %v0, %v1, %pred, - * %base) + double* %base) ret void } @@ -116,12 +109,11 @@ ; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, x1] ; CHECK-NEXT: ret %1 = getelementptr i8, i8* %addr, i64 %offset - %base = bitcast i8* %1 to * call void @llvm.aarch64.sve.st3.nxv16i8( %v0, %v1, %v2, %pred, - * %base) + i8* %base) ret void } @@ -134,12 +126,11 @@ ; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, x1, lsl #1] ; CHECK-NEXT: ret %1 = getelementptr i16, i16* %addr, i64 %offset - %base = bitcast i16* %1 to * call void @llvm.aarch64.sve.st3.nxv8i16( %v0, %v1, %v2, %pred, - * %base) + i16* %base) ret void } @@ -148,12 +139,11 @@ ; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, x1, lsl #1] ; CHECK-NEXT: ret %1 = getelementptr half, half* %addr, i64 %offset - %base = bitcast half* %1 to * call void @llvm.aarch64.sve.st3.nxv8f16( %v0, %v1, %v2, %pred, - * %base) + half* %base) ret void } @@ -166,12 +156,11 @@ ; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, x1, lsl #2] ; CHECK-NEXT: ret %1 = getelementptr i32, i32* %addr, i64 %offset - %base = bitcast i32* %1 to * call void @llvm.aarch64.sve.st3.nxv4i32( %v0, %v1, %v2, %pred, - * %base) + i32* %base) ret void } @@ -180,12 +169,11 @@ ; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, x1, lsl #2] ; CHECK-NEXT: ret %1 = getelementptr float, float* %addr, i64 %offset - %base = bitcast float* %1 to * call void @llvm.aarch64.sve.st3.nxv4f32( %v0, %v1, %v2, %pred, - * %base) + float* %base) ret void } @@ -198,12 +186,11 @@ ; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, x1, lsl #3] ; CHECK-NEXT: ret %1 = getelementptr i64, i64* %addr, i64 %offset - %base = bitcast i64* %1 to * call void @llvm.aarch64.sve.st3.nxv2i64( %v0, %v1, %v2, %pred, - * %base) + i64* %base) ret void } @@ -212,12 +199,11 @@ ; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, x1, lsl #3] ; CHECK-NEXT: ret %1 = getelementptr double, double* %addr, i64 %offset - %base = bitcast double* %1 to * call void @llvm.aarch64.sve.st3.nxv2f64( %v0, %v1, %v2, %pred, - * %base) + double* %base) ret void } @@ -230,13 +216,12 @@ ; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x1] ; CHECK-NEXT: ret %1 = getelementptr i8, i8* %addr, i64 %offset - %base = bitcast i8* %1 to * call void @llvm.aarch64.sve.st4.nxv16i8( %v0, %v1, %v2, %v3, %pred, - * %base) + i8* %base) ret void } @@ -249,13 +234,12 @@ ; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, x1, lsl #1] ; CHECK-NEXT: ret %1 = getelementptr i16, i16* %addr, i64 %offset - %base = bitcast i16* %1 to * call void @llvm.aarch64.sve.st4.nxv8i16( %v0, %v1, %v2, %v3, %pred, - * %base) + i16* %base) ret void } @@ -264,13 +248,12 @@ ; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, x1, lsl #1] ; CHECK-NEXT: ret %1 = getelementptr half, half* %addr, i64 %offset - %base = bitcast half* %1 to * call void @llvm.aarch64.sve.st4.nxv8f16( %v0, %v1, %v2, %v3, %pred, - * %base) + half* %base) ret void } @@ -283,13 +266,12 @@ ; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, x1, lsl #2] ; CHECK-NEXT: ret %1 = getelementptr i32, i32* %addr, i64 %offset - %base = bitcast i32* %1 to * call void @llvm.aarch64.sve.st4.nxv4i32( %v0, %v1, %v2, %v3, %pred, - * %base) + i32* %base) ret void } @@ -298,13 +280,12 @@ ; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, x1, lsl #2] ; CHECK-NEXT: ret %1 = getelementptr float, float* %addr, i64 %offset - %base = bitcast float* %1 to * call void @llvm.aarch64.sve.st4.nxv4f32( %v0, %v1, %v2, %v3, %pred, - * %base) + float* %base) ret void } @@ -317,13 +298,12 @@ ; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, x1, lsl #3] ; CHECK-NEXT: ret %1 = getelementptr i64, i64* %addr, i64 %offset - %base = bitcast i64* %1 to * call void @llvm.aarch64.sve.st4.nxv2i64( %v0, %v1, %v2, %v3, %pred, - * %base) + i64* %base) ret void } @@ -332,36 +312,35 @@ ; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, x1, lsl #3] ; CHECK-NEXT: ret %1 = getelementptr double, double* %addr, i64 %offset - %base = bitcast double* %1 to * call void @llvm.aarch64.sve.st4.nxv2f64( %v0, %v1, %v2, %v3, %pred, - * %base) + double* %base) ret void } -declare void @llvm.aarch64.sve.st2.nxv16i8(, , , *) -declare void @llvm.aarch64.sve.st2.nxv8i16(, , , *) -declare void @llvm.aarch64.sve.st2.nxv4i32(, , , *) -declare void @llvm.aarch64.sve.st2.nxv2i64(, , , *) -declare void @llvm.aarch64.sve.st2.nxv8f16(, , , *) -declare void @llvm.aarch64.sve.st2.nxv4f32(, , , *) -declare void @llvm.aarch64.sve.st2.nxv2f64(, , , *) - -declare void @llvm.aarch64.sve.st3.nxv16i8(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv8i16(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv4i32(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv2i64(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv8f16(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv4f32(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv2f64(, , , , *) - -declare void @llvm.aarch64.sve.st4.nxv16i8(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv8i16(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv4i32(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv2i64(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv8f16(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv4f32(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv2f64(, , , , , *) +declare void @llvm.aarch64.sve.st2.nxv16i8(, , , i8*) +declare void @llvm.aarch64.sve.st2.nxv8i16(, , , i16*) +declare void @llvm.aarch64.sve.st2.nxv4i32(, , , i32*) +declare void @llvm.aarch64.sve.st2.nxv2i64(, , , i64*) +declare void @llvm.aarch64.sve.st2.nxv8f16(, , , half*) +declare void @llvm.aarch64.sve.st2.nxv4f32(, , , float*) +declare void @llvm.aarch64.sve.st2.nxv2f64(, , , double*) + +declare void @llvm.aarch64.sve.st3.nxv16i8(, , , , i8*) +declare void @llvm.aarch64.sve.st3.nxv8i16(, , , , i16*) +declare void @llvm.aarch64.sve.st3.nxv4i32(, , , , i32*) +declare void @llvm.aarch64.sve.st3.nxv2i64(, , , , i64*) +declare void @llvm.aarch64.sve.st3.nxv8f16(, , , , half*) +declare void @llvm.aarch64.sve.st3.nxv4f32(, , , , float*) +declare void @llvm.aarch64.sve.st3.nxv2f64(, , , , double*) + +declare void @llvm.aarch64.sve.st4.nxv16i8(, , , , , i8*) +declare void @llvm.aarch64.sve.st4.nxv8i16(, , , , , i16*) +declare void @llvm.aarch64.sve.st4.nxv4i32(, , , , , i32*) +declare void @llvm.aarch64.sve.st4.nxv2i64(, , , , , i64*) +declare void @llvm.aarch64.sve.st4.nxv8f16(, , , , , half*) +declare void @llvm.aarch64.sve.st4.nxv4f32(, , , , , float*) +declare void @llvm.aarch64.sve.st4.nxv2f64(, , , , , double*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll @@ -4,14 +4,14 @@ ; ST2B ; -define void @st2b_i8( %v0, %v1, %pred, * %addr) { +define void @st2b_i8( %v0, %v1, %pred, i8* %addr) { ; CHECK-LABEL: st2b_i8: ; CHECK: st2b { z0.b, z1.b }, p0, [x0] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st2.nxv16i8( %v0, %v1, %pred, - * %addr) + i8* %addr) ret void } @@ -19,25 +19,25 @@ ; ST2H ; -define void @st2h_i16( %v0, %v1, %pred, * %addr) { +define void @st2h_i16( %v0, %v1, %pred, i16* %addr) { ; CHECK-LABEL: st2h_i16: ; CHECK: st2h { z0.h, z1.h }, p0, [x0] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st2.nxv8i16( %v0, %v1, %pred, - * %addr) + i16* %addr) ret void } -define void @st2h_f16( %v0, %v1, %pred, * %addr) { +define void @st2h_f16( %v0, %v1, %pred, half* %addr) { ; CHECK-LABEL: st2h_f16: ; CHECK: st2h { z0.h, z1.h }, p0, [x0] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st2.nxv8f16( %v0, %v1, %pred, - * %addr) + half* %addr) ret void } @@ -45,25 +45,25 @@ ; ST2W ; -define void @st2w_i32( %v0, %v1, %pred, * %addr) { +define void @st2w_i32( %v0, %v1, %pred, i32* %addr) { ; CHECK-LABEL: st2w_i32: ; CHECK: st2w { z0.s, z1.s }, p0, [x0] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st2.nxv4i32( %v0, %v1, %pred, - * %addr) + i32* %addr) ret void } -define void @st2w_f32( %v0, %v1, %pred, * %addr) { +define void @st2w_f32( %v0, %v1, %pred, float* %addr) { ; CHECK-LABEL: st2w_f32: ; CHECK: st2w { z0.s, z1.s }, p0, [x0] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st2.nxv4f32( %v0, %v1, %pred, - * %addr) + float* %addr) ret void } @@ -71,25 +71,25 @@ ; ST2D ; -define void @st2d_i64( %v0, %v1, %pred, * %addr) { +define void @st2d_i64( %v0, %v1, %pred, i64* %addr) { ; CHECK-LABEL: st2d_i64: ; CHECK: st2d { z0.d, z1.d }, p0, [x0] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st2.nxv2i64( %v0, %v1, %pred, - * %addr) + i64* %addr) ret void } -define void @st2d_f64( %v0, %v1, %pred, * %addr) { +define void @st2d_f64( %v0, %v1, %pred, double* %addr) { ; CHECK-LABEL: st2d_f64: ; CHECK: st2d { z0.d, z1.d }, p0, [x0] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st2.nxv2f64( %v0, %v1, %pred, - * %addr) + double* %addr) ret void } @@ -97,7 +97,7 @@ ; ST3B ; -define void @st3b_i8( %v0, %v1, %v2, %pred, * %addr) { +define void @st3b_i8( %v0, %v1, %v2, %pred, i8* %addr) { ; CHECK-LABEL: st3b_i8: ; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0] ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ %v1, %v2, %pred, - * %addr) + i8* %addr) ret void } @@ -113,7 +113,7 @@ ; ST3H ; -define void @st3h_i16( %v0, %v1, %v2, %pred, * %addr) { +define void @st3h_i16( %v0, %v1, %v2, %pred, i16* %addr) { ; CHECK-LABEL: st3h_i16: ; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0] ; CHECK-NEXT: ret @@ -121,11 +121,11 @@ %v1, %v2, %pred, - * %addr) + i16* %addr) ret void } -define void @st3h_f16( %v0, %v1, %v2, %pred, * %addr) { +define void @st3h_f16( %v0, %v1, %v2, %pred, half* %addr) { ; CHECK-LABEL: st3h_f16: ; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0] ; CHECK-NEXT: ret @@ -133,7 +133,7 @@ %v1, %v2, %pred, - * %addr) + half* %addr) ret void } @@ -141,7 +141,7 @@ ; ST3W ; -define void @st3w_i32( %v0, %v1, %v2, %pred, * %addr) { +define void @st3w_i32( %v0, %v1, %v2, %pred, i32* %addr) { ; CHECK-LABEL: st3w_i32: ; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0] ; CHECK-NEXT: ret @@ -149,11 +149,11 @@ %v1, %v2, %pred, - * %addr) + i32* %addr) ret void } -define void @st3w_f32( %v0, %v1, %v2, %pred, * %addr) { +define void @st3w_f32( %v0, %v1, %v2, %pred, float* %addr) { ; CHECK-LABEL: st3w_f32: ; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0] ; CHECK-NEXT: ret @@ -161,7 +161,7 @@ %v1, %v2, %pred, - * %addr) + float* %addr) ret void } @@ -169,7 +169,7 @@ ; ST3D ; -define void @st3d_i64( %v0, %v1, %v2, %pred, * %addr) { +define void @st3d_i64( %v0, %v1, %v2, %pred, i64* %addr) { ; CHECK-LABEL: st3d_i64: ; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0] ; CHECK-NEXT: ret @@ -177,11 +177,11 @@ %v1, %v2, %pred, - * %addr) + i64* %addr) ret void } -define void @st3d_f64( %v0, %v1, %v2, %pred, * %addr) { +define void @st3d_f64( %v0, %v1, %v2, %pred, double* %addr) { ; CHECK-LABEL: st3d_f64: ; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0] ; CHECK-NEXT: ret @@ -189,7 +189,7 @@ %v1, %v2, %pred, - * %addr) + double* %addr) ret void } @@ -197,7 +197,7 @@ ; ST4B ; -define void @st4b_i8( %v0, %v1, %v2, %v3, %pred, * %addr) { +define void @st4b_i8( %v0, %v1, %v2, %v3, %pred, i8* %addr) { ; CHECK-LABEL: st4b_i8: ; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0] ; CHECK-NEXT: ret @@ -206,7 +206,7 @@ %v2, %v3, %pred, - * %addr) + i8* %addr) ret void } @@ -214,7 +214,7 @@ ; ST4H ; -define void @st4h_i16( %v0, %v1, %v2, %v3, %pred, * %addr) { +define void @st4h_i16( %v0, %v1, %v2, %v3, %pred, i16* %addr) { ; CHECK-LABEL: st4h_i16: ; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0] ; CHECK-NEXT: ret @@ -223,11 +223,11 @@ %v2, %v3, %pred, - * %addr) + i16* %addr) ret void } -define void @st4h_f16( %v0, %v1, %v2, %v3, %pred, * %addr) { +define void @st4h_f16( %v0, %v1, %v2, %v3, %pred, half* %addr) { ; CHECK-LABEL: st4h_f16: ; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0] ; CHECK-NEXT: ret @@ -236,7 +236,7 @@ %v2, %v3, %pred, - * %addr) + half* %addr) ret void } @@ -244,7 +244,7 @@ ; ST4W ; -define void @st4w_i32( %v0, %v1, %v2, %v3, %pred, * %addr) { +define void @st4w_i32( %v0, %v1, %v2, %v3, %pred, i32* %addr) { ; CHECK-LABEL: st4w_i32: ; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0] ; CHECK-NEXT: ret @@ -253,11 +253,11 @@ %v2, %v3, %pred, - * %addr) + i32* %addr) ret void } -define void @st4w_f32( %v0, %v1, %v2, %v3, %pred, * %addr) { +define void @st4w_f32( %v0, %v1, %v2, %v3, %pred, float* %addr) { ; CHECK-LABEL: st4w_f32: ; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0] ; CHECK-NEXT: ret @@ -266,7 +266,7 @@ %v2, %v3, %pred, - * %addr) + float* %addr) ret void } @@ -274,7 +274,7 @@ ; ST4D ; -define void @st4d_i64( %v0, %v1, %v2, %v3, %pred, * %addr) { +define void @st4d_i64( %v0, %v1, %v2, %v3, %pred, i64* %addr) { ; CHECK-LABEL: st4d_i64: ; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0] ; CHECK-NEXT: ret @@ -283,11 +283,11 @@ %v2, %v3, %pred, - * %addr) + i64* %addr) ret void } -define void @st4d_f64( %v0, %v1, %v2, %v3, %pred, * %addr) { +define void @st4d_f64( %v0, %v1, %v2, %v3, %pred, double* %addr) { ; CHECK-LABEL: st4d_f64: ; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0] ; CHECK-NEXT: ret @@ -296,7 +296,7 @@ %v2, %v3, %pred, - * %addr) + double* %addr) ret void } @@ -387,29 +387,29 @@ } -declare void @llvm.aarch64.sve.st2.nxv16i8(, , , *) -declare void @llvm.aarch64.sve.st2.nxv8i16(, , , *) -declare void @llvm.aarch64.sve.st2.nxv4i32(, , , *) -declare void @llvm.aarch64.sve.st2.nxv2i64(, , , *) -declare void @llvm.aarch64.sve.st2.nxv8f16(, , , *) -declare void @llvm.aarch64.sve.st2.nxv4f32(, , , *) -declare void @llvm.aarch64.sve.st2.nxv2f64(, , , *) - -declare void @llvm.aarch64.sve.st3.nxv16i8(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv8i16(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv4i32(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv2i64(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv8f16(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv4f32(, , , , *) -declare void @llvm.aarch64.sve.st3.nxv2f64(, , , , *) - -declare void @llvm.aarch64.sve.st4.nxv16i8(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv8i16(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv4i32(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv2i64(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv8f16(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv4f32(, , , , , *) -declare void @llvm.aarch64.sve.st4.nxv2f64(, , , , , *) +declare void @llvm.aarch64.sve.st2.nxv16i8(, , , i8*) +declare void @llvm.aarch64.sve.st2.nxv8i16(, , , i16*) +declare void @llvm.aarch64.sve.st2.nxv4i32(, , , i32*) +declare void @llvm.aarch64.sve.st2.nxv2i64(, , , i64*) +declare void @llvm.aarch64.sve.st2.nxv8f16(, , , half*) +declare void @llvm.aarch64.sve.st2.nxv4f32(, , , float*) +declare void @llvm.aarch64.sve.st2.nxv2f64(, , , double*) + +declare void @llvm.aarch64.sve.st3.nxv16i8(, , , , i8*) +declare void @llvm.aarch64.sve.st3.nxv8i16(, , , , i16*) +declare void @llvm.aarch64.sve.st3.nxv4i32(, , , , i32*) +declare void @llvm.aarch64.sve.st3.nxv2i64(, , , , i64*) +declare void @llvm.aarch64.sve.st3.nxv8f16(, , , , half*) +declare void @llvm.aarch64.sve.st3.nxv4f32(, , , , float*) +declare void @llvm.aarch64.sve.st3.nxv2f64(, , , , double*) + +declare void @llvm.aarch64.sve.st4.nxv16i8(, , , , , i8*) +declare void @llvm.aarch64.sve.st4.nxv8i16(, , , , , i16*) +declare void @llvm.aarch64.sve.st4.nxv4i32(, , , , , i32*) +declare void @llvm.aarch64.sve.st4.nxv2i64(, , , , , i64*) +declare void @llvm.aarch64.sve.st4.nxv8f16(, , , , , half*) +declare void @llvm.aarch64.sve.st4.nxv4f32(, , , , , float*) +declare void @llvm.aarch64.sve.st4.nxv2f64(, , , , , double*) declare void @llvm.aarch64.sve.stnt1.nxv16i8(, , i8*) declare void @llvm.aarch64.sve.stnt1.nxv8i16(, , i16*)