Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1426,6 +1426,11 @@ def int_aarch64_sve_udot : AdvSIMD_SVE_DOT_Intrinsic; def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic; +def int_aarch64_sve_sqadd_x : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_sqsub_x : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_uqadd_x : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_uqsub_x : AdvSIMD_2VectorArg_Intrinsic; + // Shifts def int_aarch64_sve_asr : AdvSIMD_Pred2VectorArg_Intrinsic; Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -148,10 +148,30 @@ defm ADD_ZZZ : sve_int_bin_cons_arit_0<0b000, "add", add>; defm SUB_ZZZ : sve_int_bin_cons_arit_0<0b001, "sub", sub>; - defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", saddsat>; - defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd", uaddsat>; - defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub", ssubsat>; - defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub", usubsat>; + defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", int_aarch64_sve_sqadd_x>; + defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd", int_aarch64_sve_uqadd_x>; + defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub", int_aarch64_sve_sqsub_x>; + defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub", int_aarch64_sve_uqsub_x>; + + def : Pat<(nxv16i8 (saddsat nxv16i8:$Op1, nxv16i8:$Op2)), (SQADD_ZZZ_B $Op1, $Op2)>; + def : Pat<(nxv8i16 (saddsat nxv8i16:$Op1, nxv8i16:$Op2)), (SQADD_ZZZ_H $Op1, $Op2)>; + def : Pat<(nxv4i32 (saddsat nxv4i32:$Op1, nxv4i32:$Op2)), (SQADD_ZZZ_S $Op1, $Op2)>; + def : Pat<(nxv2i64 (saddsat nxv2i64:$Op1, nxv2i64:$Op2)), (SQADD_ZZZ_D $Op1, $Op2)>; + + def : Pat<(nxv16i8 (uaddsat nxv16i8:$Op1, nxv16i8:$Op2)), (UQADD_ZZZ_B $Op1, $Op2)>; + def : Pat<(nxv8i16 (uaddsat nxv8i16:$Op1, nxv8i16:$Op2)), (UQADD_ZZZ_H $Op1, $Op2)>; + def : Pat<(nxv4i32 (uaddsat nxv4i32:$Op1, nxv4i32:$Op2)), (UQADD_ZZZ_S $Op1, $Op2)>; + def : Pat<(nxv2i64 (uaddsat nxv2i64:$Op1, nxv2i64:$Op2)), (UQADD_ZZZ_D $Op1, $Op2)>; + + def : Pat<(nxv16i8 (ssubsat nxv16i8:$Op1, nxv16i8:$Op2)), (SQSUB_ZZZ_B $Op1, $Op2)>; + def : Pat<(nxv8i16 (ssubsat nxv8i16:$Op1, nxv8i16:$Op2)), (SQSUB_ZZZ_H $Op1, $Op2)>; + def : Pat<(nxv4i32 (ssubsat nxv4i32:$Op1, nxv4i32:$Op2)), (SQSUB_ZZZ_S $Op1, $Op2)>; + def : Pat<(nxv2i64 (ssubsat nxv2i64:$Op1, nxv2i64:$Op2)), (SQSUB_ZZZ_D $Op1, $Op2)>; + + def : Pat<(nxv16i8 (usubsat nxv16i8:$Op1, nxv16i8:$Op2)), (UQSUB_ZZZ_B $Op1, $Op2)>; + def : Pat<(nxv8i16 (usubsat nxv8i16:$Op1, nxv8i16:$Op2)), (UQSUB_ZZZ_H $Op1, $Op2)>; + def : Pat<(nxv4i32 (usubsat nxv4i32:$Op1, nxv4i32:$Op2)), (UQSUB_ZZZ_S $Op1, $Op2)>; + def : Pat<(nxv2i64 (usubsat nxv2i64:$Op1, nxv2i64:$Op2)), (UQSUB_ZZZ_D $Op1, $Op2)>; defm AND_ZZZ : sve_int_bin_cons_log<0b00, "and", and>; defm ORR_ZZZ : sve_int_bin_cons_log<0b01, "orr", or>; @@ -170,10 +190,36 @@ defm ADD_ZI : sve_int_arith_imm0<0b000, "add", add>; defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", sub>; defm SUBR_ZI : sve_int_arith_imm0_subr<0b011, "subr", sub>; - defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", saddsat>; - defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat>; - defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>; - defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>; + defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", int_aarch64_sve_sqadd_x>; + defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", int_aarch64_sve_uqadd_x>; + defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", int_aarch64_sve_sqsub_x>; + defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", int_aarch64_sve_uqsub_x>; + + multiclass qaddsub_imm { + def _default : Pat<(Ty (Op (Ty zprty:$Op1), (Ty (AArch64dup (ImmTy (cpx i32:$imm, i32:$shift)))))), + (Inst $Op1, i32:$imm, i32:$shift)>; + } + + defm Pat_SQADD_ZI_B : qaddsub_imm; + defm Pat_SQADD_ZI_H : qaddsub_imm; + defm Pat_SQADD_ZI_S : qaddsub_imm; + defm Pat_SQADD_ZI_D : qaddsub_imm; + + defm Pat_UQADD_ZI_B : qaddsub_imm; + defm Pat_UQADD_ZI_H : qaddsub_imm; + defm Pat_UQADD_ZI_S : qaddsub_imm; + defm Pat_UQADD_ZI_D : qaddsub_imm; + + defm Pat_SQSUB_ZI_B : qaddsub_imm; + defm Pat_SQSUB_ZI_H : qaddsub_imm; + defm Pat_SQSUB_ZI_S : qaddsub_imm; + defm Pat_SQSUB_ZI_D : qaddsub_imm; + + defm Pat_UQSUB_ZI_B : qaddsub_imm; + defm Pat_UQSUB_ZI_H : qaddsub_imm; + defm Pat_UQSUB_ZI_S : qaddsub_imm; + defm Pat_UQSUB_ZI_D : qaddsub_imm; defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>; defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>; Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll @@ -0,0 +1,338 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; SQADD + +define @sqadd_b_lowimm( %a) { +; CHECK-LABEL: sqadd_b_lowimm: +; CHECK: sqadd z0.b, z0.b, #27 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 27, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv16i8( %a, + %splat) + ret %out +} + +define @sqadd_h_lowimm( %a) { +; CHECK-LABEL: sqadd_h_lowimm: +; CHECK: sqadd z0.h, z0.h, #43 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 43, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv8i16( %a, + %splat) + ret %out +} + +define @sqadd_h_highimm( %a) { +; CHECK-LABEL: sqadd_h_highimm: +; CHECK: sqadd z0.h, z0.h, #2048 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 2048, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv8i16( %a, + %splat) + ret %out +} + +define @sqadd_s_lowimm( %a) { +; CHECK-LABEL: sqadd_s_lowimm: +; CHECK: sqadd z0.s, z0.s, #1 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 1, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv4i32( %a, + %splat) + ret %out +} + +define @sqadd_s_highimm( %a) { +; CHECK-LABEL: sqadd_s_highimm: +; CHECK: sqadd z0.s, z0.s, #8192 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 8192, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv4i32( %a, + %splat) + ret %out +} + +define @sqadd_d_lowimm( %a) { +; CHECK-LABEL: sqadd_d_lowimm: +; CHECK: sqadd z0.d, z0.d, #255 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv2i64( %a, + %splat) + ret %out +} + +define @sqadd_d_highimm( %a) { +; CHECK-LABEL: sqadd_d_highimm: +; CHECK: sqadd z0.d, z0.d, #65280 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 65280, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv2i64( %a, + %splat) + ret %out +} + +; SQSUB + +define @sqsub_b_lowimm( %a) { +; CHECK-LABEL: sqsub_b_lowimm: +; CHECK: sqsub z0.b, z0.b, #27 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 27, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv16i8( %a, + %splat) + ret %out +} + +define @sqsub_h_lowimm( %a) { +; CHECK-LABEL: sqsub_h_lowimm: +; CHECK: sqsub z0.h, z0.h, #43 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 43, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv8i16( %a, + %splat) + ret %out +} + +define @sqsub_h_highimm( %a) { +; CHECK-LABEL: sqsub_h_highimm: +; CHECK: sqsub z0.h, z0.h, #2048 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 2048, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv8i16( %a, + %splat) + ret %out +} + +define @sqsub_s_lowimm( %a) { +; CHECK-LABEL: sqsub_s_lowimm: +; CHECK: sqsub z0.s, z0.s, #1 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 1, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv4i32( %a, + %splat) + ret %out +} + +define @sqsub_s_highimm( %a) { +; CHECK-LABEL: sqsub_s_highimm: +; CHECK: sqsub z0.s, z0.s, #8192 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 8192, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv4i32( %a, + %splat) + ret %out +} + +define @sqsub_d_lowimm( %a) { +; CHECK-LABEL: sqsub_d_lowimm: +; CHECK: sqsub z0.d, z0.d, #255 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv2i64( %a, + %splat) + ret %out +} + +define @sqsub_d_highimm( %a) { +; CHECK-LABEL: sqsub_d_highimm: +; CHECK: sqsub z0.d, z0.d, #65280 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 65280, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv2i64( %a, + %splat) + ret %out +} + +; UQADD + +define @uqadd_b_lowimm( %a) { +; CHECK-LABEL: uqadd_b_lowimm: +; CHECK: uqadd z0.b, z0.b, #27 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 27, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv16i8( %a, + %splat) + ret %out +} + +define @uqadd_h_lowimm( %a) { +; CHECK-LABEL: uqadd_h_lowimm: +; CHECK: uqadd z0.h, z0.h, #43 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 43, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv8i16( %a, + %splat) + ret %out +} + +define @uqadd_h_highimm( %a) { +; CHECK-LABEL: uqadd_h_highimm: +; CHECK: uqadd z0.h, z0.h, #2048 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 2048, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv8i16( %a, + %splat) + ret %out +} + +define @uqadd_s_lowimm( %a) { +; CHECK-LABEL: uqadd_s_lowimm: +; CHECK: uqadd z0.s, z0.s, #1 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 1, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv4i32( %a, + %splat) + ret %out +} + +; UQSUB + +define @uqsub_b_lowimm( %a) { +; CHECK-LABEL: uqsub_b_lowimm: +; CHECK: uqsub z0.b, z0.b, #27 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 27, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv16i8( %a, + %splat) + ret %out +} + +define @uqsub_h_lowimm( %a) { +; CHECK-LABEL: uqsub_h_lowimm: +; CHECK: uqsub z0.h, z0.h, #43 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 43, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv8i16( %a, + %splat) + ret %out +} + +define @uqsub_h_highimm( %a) { +; CHECK-LABEL: uqsub_h_highimm: +; CHECK: uqsub z0.h, z0.h, #2048 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 2048, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv8i16( %a, + %splat) + ret %out +} + +define @uqsub_s_lowimm( %a) { +; CHECK-LABEL: uqsub_s_lowimm: +; CHECK: uqsub z0.s, z0.s, #1 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 1, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv4i32( %a, + %splat) + ret %out +} + +define @uqsub_s_highimm( %a) { +; CHECK-LABEL: uqsub_s_highimm: +; CHECK: uqsub z0.s, z0.s, #8192 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 8192, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv4i32( %a, + %splat) + ret %out +} + +define @uqsub_d_lowimm( %a) { +; CHECK-LABEL: uqsub_d_lowimm: +; CHECK: uqsub z0.d, z0.d, #255 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv2i64( %a, + %splat) + ret %out +} + +define @uqsub_d_highimm( %a) { +; CHECK-LABEL: uqsub_d_highimm: +; CHECK: uqsub z0.d, z0.d, #65280 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 65280, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv2i64( %a, + %splat) + ret %out +} + + +define @uqadd_s_highimm( %a) { +; CHECK-LABEL: uqadd_s_highimm: +; CHECK: uqadd z0.s, z0.s, #8192 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 8192, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv4i32( %a, + %splat) + ret %out +} + +define @uqadd_d_lowimm( %a) { +; CHECK-LABEL: uqadd_d_lowimm: +; CHECK: uqadd z0.d, z0.d, #255 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv2i64( %a, + %splat) + ret %out +} + +define @uqadd_d_highimm( %a) { +; CHECK-LABEL: uqadd_d_highimm: +; CHECK: uqadd z0.d, z0.d, #65280 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 65280, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv2i64( %a, + %splat) + ret %out +} + +declare @llvm.aarch64.sve.sqadd.x.nxv16i8(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv8i16(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv4i32(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.sqsub.x.nxv16i8(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv8i16(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv4i32(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.uqadd.x.nxv16i8(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv8i16(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv4i32(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.uqsub.x.nxv16i8(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv8i16(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv4i32(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv2i64(, ) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll @@ -134,6 +134,82 @@ ret %out } +; SQADD + +define @sqadd_i8( %a, %b) { +; CHECK-LABEL: sqadd_i8: +; CHECK: sqadd z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqadd.x.nxv16i8( %a, + %b) + ret %out +} + +define @sqadd_i16( %a, %b) { +; CHECK-LABEL: sqadd_i16: +; CHECK: sqadd z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqadd.x.nxv8i16( %a, + %b) + ret %out +} + +define @sqadd_i32( %a, %b) { +; CHECK-LABEL: sqadd_i32: +; CHECK: sqadd z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqadd.x.nxv4i32( %a, + %b) + ret %out +} + +define @sqadd_i64( %a, %b) { +; CHECK-LABEL: sqadd_i64: +; CHECK: sqadd z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqadd.x.nxv2i64( %a, + %b) + ret %out +} + +; SQSUB + +define @sqsub_i8( %a, %b) { +; CHECK-LABEL: sqsub_i8: +; CHECK: sqsub z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqsub.x.nxv16i8( %a, + %b) + ret %out +} + +define @sqsub_i16( %a, %b) { +; CHECK-LABEL: sqsub_i16: +; CHECK: sqsub z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqsub.x.nxv8i16( %a, + %b) + ret %out +} + +define @sqsub_i32( %a, %b) { +; CHECK-LABEL: sqsub_i32: +; CHECK: sqsub z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqsub.x.nxv4i32( %a, + %b) + ret %out +} + +define @sqsub_i64( %a, %b) { +; CHECK-LABEL: sqsub_i64: +; CHECK: sqsub z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqsub.x.nxv2i64( %a, + %b) + ret %out +} + ; UDOT define @udot_i32( %a, %b, %c) { @@ -169,6 +245,82 @@ ret %out } +; UQADD + +define @uqadd_i8( %a, %b) { +; CHECK-LABEL: uqadd_i8: +; CHECK: uqadd z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqadd.x.nxv16i8( %a, + %b) + ret %out +} + +define @uqadd_i16( %a, %b) { +; CHECK-LABEL: uqadd_i16: +; CHECK: uqadd z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqadd.x.nxv8i16( %a, + %b) + ret %out +} + +define @uqadd_i32( %a, %b) { +; CHECK-LABEL: uqadd_i32: +; CHECK: uqadd z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqadd.x.nxv4i32( %a, + %b) + ret %out +} + +define @uqadd_i64( %a, %b) { +; CHECK-LABEL: uqadd_i64: +; CHECK: uqadd z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqadd.x.nxv2i64( %a, + %b) + ret %out +} + +; UQSUB + +define @uqsub_i8( %a, %b) { +; CHECK-LABEL: uqsub_i8: +; CHECK: uqsub z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqsub.x.nxv16i8( %a, + %b) + ret %out +} + +define @uqsub_i16( %a, %b) { +; CHECK-LABEL: uqsub_i16: +; CHECK: uqsub z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqsub.x.nxv8i16( %a, + %b) + ret %out +} + +define @uqsub_i32( %a, %b) { +; CHECK-LABEL: uqsub_i32: +; CHECK: uqsub z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqsub.x.nxv4i32( %a, + %b) + ret %out +} + +define @uqsub_i64( %a, %b) { +; CHECK-LABEL: uqsub_i64: +; CHECK: uqsub z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqsub.x.nxv2i64( %a, + %b) + ret %out +} + declare @llvm.aarch64.sve.abs.nxv16i8(, , ) declare @llvm.aarch64.sve.abs.nxv8i16(, , ) declare @llvm.aarch64.sve.abs.nxv4i32(, , ) @@ -185,8 +337,28 @@ declare @llvm.aarch64.sve.sdot.lane.nxv4i32(, , , i32) declare @llvm.aarch64.sve.sdot.lane.nxv2i64(, , , i32) +declare @llvm.aarch64.sve.sqadd.x.nxv16i8(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv8i16(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv4i32(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.sqsub.x.nxv16i8(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv8i16(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv4i32(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv2i64(, ) + declare @llvm.aarch64.sve.udot.nxv4i32(, , ) declare @llvm.aarch64.sve.udot.nxv2i64(, , ) declare @llvm.aarch64.sve.udot.lane.nxv4i32(, , , i32) declare @llvm.aarch64.sve.udot.lane.nxv2i64(, , , i32) + +declare @llvm.aarch64.sve.uqadd.x.nxv16i8(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv8i16(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv4i32(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.uqsub.x.nxv16i8(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv8i16(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv4i32(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv2i64(, )