diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1426,6 +1426,11 @@ def int_aarch64_sve_udot : AdvSIMD_SVE_DOT_Intrinsic; def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic; +def int_aarch64_sve_sqadd_x : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_sqsub_x : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_uqadd_x : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_uqsub_x : AdvSIMD_2VectorArg_Intrinsic; + // Shifts def int_aarch64_sve_asr : AdvSIMD_Pred2VectorArg_Intrinsic; diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -146,12 +146,12 @@ def SETFFR : sve_int_setffr<"setffr", int_aarch64_sve_setffr>; def WRFFR : sve_int_wrffr<"wrffr", int_aarch64_sve_wrffr>; - defm ADD_ZZZ : sve_int_bin_cons_arit_0<0b000, "add", add>; - defm SUB_ZZZ : sve_int_bin_cons_arit_0<0b001, "sub", sub>; - defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", saddsat>; - defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd", uaddsat>; - defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub", ssubsat>; - defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub", usubsat>; + defm ADD_ZZZ : sve_int_bin_cons_arit_0<0b000, "add", add, null_frag>; + defm SUB_ZZZ : sve_int_bin_cons_arit_0<0b001, "sub", sub, null_frag>; + defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", saddsat, int_aarch64_sve_sqadd_x>; + defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd", uaddsat, int_aarch64_sve_uqadd_x>; + defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub", ssubsat, int_aarch64_sve_sqsub_x>; + defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub", usubsat, int_aarch64_sve_uqsub_x>; defm AND_ZZZ : sve_int_bin_cons_log<0b00, "and", and>; defm ORR_ZZZ : sve_int_bin_cons_log<0b01, "orr", or>; @@ -167,13 +167,13 @@ defm AND_ZPmZ : sve_int_bin_pred_log<0b010, "and", int_aarch64_sve_and>; defm BIC_ZPmZ : sve_int_bin_pred_log<0b011, "bic", int_aarch64_sve_bic>; - defm ADD_ZI : sve_int_arith_imm0<0b000, "add", add>; - defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", sub>; + defm ADD_ZI : sve_int_arith_imm0<0b000, "add", add, null_frag>; + defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", sub, null_frag>; defm SUBR_ZI : sve_int_arith_imm0_subr<0b011, "subr", sub>; - defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", saddsat>; - defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat>; - defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>; - defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>; + defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", saddsat, int_aarch64_sve_sqadd_x>; + defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat, int_aarch64_sve_uqadd_x>; + defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat, int_aarch64_sve_sqsub_x>; + defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat, int_aarch64_sve_uqsub_x>; defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>; defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>; diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -1485,7 +1485,8 @@ let Inst{4-0} = Zd; } -multiclass sve_int_bin_cons_arit_0 opc, string asm, SDPatternOperator op> { +multiclass sve_int_bin_cons_arit_0 opc, string asm, + SDPatternOperator op, SDPatternOperator int_op> { def _B : sve_int_bin_cons_arit_0<0b00, opc, asm, ZPR8>; def _H : sve_int_bin_cons_arit_0<0b01, opc, asm, ZPR16>; def _S : sve_int_bin_cons_arit_0<0b10, opc, asm, ZPR32>; @@ -1495,6 +1496,12 @@ def : SVE_2_Op_Pat(NAME # _H)>; def : SVE_2_Op_Pat(NAME # _S)>; def : SVE_2_Op_Pat(NAME # _D)>; + + // Intrinsic version + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } //===----------------------------------------------------------------------===// @@ -3776,7 +3783,8 @@ let ElementSize = ElementSizeNone; } -multiclass sve_int_arith_imm0 opc, string asm, SDPatternOperator op> { +multiclass sve_int_arith_imm0 opc, string asm, + SDPatternOperator op, SDPatternOperator int_op> { def _B : sve_int_arith_imm0<0b00, opc, asm, ZPR8, addsub_imm8_opt_lsl_i8>; def _H : sve_int_arith_imm0<0b01, opc, asm, ZPR16, addsub_imm8_opt_lsl_i16>; def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>; @@ -3786,6 +3794,12 @@ def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _H)>; def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _S)>; def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _D)>; + + // Intrinsic version + def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _B)>; + def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _H)>; + def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _S)>; + def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _D)>; } multiclass sve_int_arith_imm0_subr opc, string asm, SDPatternOperator op> { diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll @@ -0,0 +1,338 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; SQADD + +define @sqadd_b_lowimm( %a) { +; CHECK-LABEL: sqadd_b_lowimm: +; CHECK: sqadd z0.b, z0.b, #27 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 27, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv16i8( %a, + %splat) + ret %out +} + +define @sqadd_h_lowimm( %a) { +; CHECK-LABEL: sqadd_h_lowimm: +; CHECK: sqadd z0.h, z0.h, #43 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 43, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv8i16( %a, + %splat) + ret %out +} + +define @sqadd_h_highimm( %a) { +; CHECK-LABEL: sqadd_h_highimm: +; CHECK: sqadd z0.h, z0.h, #2048 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 2048, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv8i16( %a, + %splat) + ret %out +} + +define @sqadd_s_lowimm( %a) { +; CHECK-LABEL: sqadd_s_lowimm: +; CHECK: sqadd z0.s, z0.s, #1 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 1, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv4i32( %a, + %splat) + ret %out +} + +define @sqadd_s_highimm( %a) { +; CHECK-LABEL: sqadd_s_highimm: +; CHECK: sqadd z0.s, z0.s, #8192 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 8192, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv4i32( %a, + %splat) + ret %out +} + +define @sqadd_d_lowimm( %a) { +; CHECK-LABEL: sqadd_d_lowimm: +; CHECK: sqadd z0.d, z0.d, #255 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv2i64( %a, + %splat) + ret %out +} + +define @sqadd_d_highimm( %a) { +; CHECK-LABEL: sqadd_d_highimm: +; CHECK: sqadd z0.d, z0.d, #65280 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 65280, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqadd.x.nxv2i64( %a, + %splat) + ret %out +} + +; SQSUB + +define @sqsub_b_lowimm( %a) { +; CHECK-LABEL: sqsub_b_lowimm: +; CHECK: sqsub z0.b, z0.b, #27 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 27, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv16i8( %a, + %splat) + ret %out +} + +define @sqsub_h_lowimm( %a) { +; CHECK-LABEL: sqsub_h_lowimm: +; CHECK: sqsub z0.h, z0.h, #43 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 43, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv8i16( %a, + %splat) + ret %out +} + +define @sqsub_h_highimm( %a) { +; CHECK-LABEL: sqsub_h_highimm: +; CHECK: sqsub z0.h, z0.h, #2048 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 2048, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv8i16( %a, + %splat) + ret %out +} + +define @sqsub_s_lowimm( %a) { +; CHECK-LABEL: sqsub_s_lowimm: +; CHECK: sqsub z0.s, z0.s, #1 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 1, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv4i32( %a, + %splat) + ret %out +} + +define @sqsub_s_highimm( %a) { +; CHECK-LABEL: sqsub_s_highimm: +; CHECK: sqsub z0.s, z0.s, #8192 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 8192, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv4i32( %a, + %splat) + ret %out +} + +define @sqsub_d_lowimm( %a) { +; CHECK-LABEL: sqsub_d_lowimm: +; CHECK: sqsub z0.d, z0.d, #255 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv2i64( %a, + %splat) + ret %out +} + +define @sqsub_d_highimm( %a) { +; CHECK-LABEL: sqsub_d_highimm: +; CHECK: sqsub z0.d, z0.d, #65280 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 65280, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.sqsub.x.nxv2i64( %a, + %splat) + ret %out +} + +; UQADD + +define @uqadd_b_lowimm( %a) { +; CHECK-LABEL: uqadd_b_lowimm: +; CHECK: uqadd z0.b, z0.b, #27 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 27, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv16i8( %a, + %splat) + ret %out +} + +define @uqadd_h_lowimm( %a) { +; CHECK-LABEL: uqadd_h_lowimm: +; CHECK: uqadd z0.h, z0.h, #43 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 43, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv8i16( %a, + %splat) + ret %out +} + +define @uqadd_h_highimm( %a) { +; CHECK-LABEL: uqadd_h_highimm: +; CHECK: uqadd z0.h, z0.h, #2048 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 2048, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv8i16( %a, + %splat) + ret %out +} + +define @uqadd_s_lowimm( %a) { +; CHECK-LABEL: uqadd_s_lowimm: +; CHECK: uqadd z0.s, z0.s, #1 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 1, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv4i32( %a, + %splat) + ret %out +} + +; UQSUB + +define @uqsub_b_lowimm( %a) { +; CHECK-LABEL: uqsub_b_lowimm: +; CHECK: uqsub z0.b, z0.b, #27 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 27, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv16i8( %a, + %splat) + ret %out +} + +define @uqsub_h_lowimm( %a) { +; CHECK-LABEL: uqsub_h_lowimm: +; CHECK: uqsub z0.h, z0.h, #43 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 43, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv8i16( %a, + %splat) + ret %out +} + +define @uqsub_h_highimm( %a) { +; CHECK-LABEL: uqsub_h_highimm: +; CHECK: uqsub z0.h, z0.h, #2048 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 2048, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv8i16( %a, + %splat) + ret %out +} + +define @uqsub_s_lowimm( %a) { +; CHECK-LABEL: uqsub_s_lowimm: +; CHECK: uqsub z0.s, z0.s, #1 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 1, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv4i32( %a, + %splat) + ret %out +} + +define @uqsub_s_highimm( %a) { +; CHECK-LABEL: uqsub_s_highimm: +; CHECK: uqsub z0.s, z0.s, #8192 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 8192, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv4i32( %a, + %splat) + ret %out +} + +define @uqsub_d_lowimm( %a) { +; CHECK-LABEL: uqsub_d_lowimm: +; CHECK: uqsub z0.d, z0.d, #255 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv2i64( %a, + %splat) + ret %out +} + +define @uqsub_d_highimm( %a) { +; CHECK-LABEL: uqsub_d_highimm: +; CHECK: uqsub z0.d, z0.d, #65280 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 65280, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqsub.x.nxv2i64( %a, + %splat) + ret %out +} + + +define @uqadd_s_highimm( %a) { +; CHECK-LABEL: uqadd_s_highimm: +; CHECK: uqadd z0.s, z0.s, #8192 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 8192, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv4i32( %a, + %splat) + ret %out +} + +define @uqadd_d_lowimm( %a) { +; CHECK-LABEL: uqadd_d_lowimm: +; CHECK: uqadd z0.d, z0.d, #255 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv2i64( %a, + %splat) + ret %out +} + +define @uqadd_d_highimm( %a) { +; CHECK-LABEL: uqadd_d_highimm: +; CHECK: uqadd z0.d, z0.d, #65280 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 65280, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.uqadd.x.nxv2i64( %a, + %splat) + ret %out +} + +declare @llvm.aarch64.sve.sqadd.x.nxv16i8(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv8i16(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv4i32(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.sqsub.x.nxv16i8(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv8i16(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv4i32(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.uqadd.x.nxv16i8(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv8i16(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv4i32(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.uqsub.x.nxv16i8(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv8i16(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv4i32(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv2i64(, ) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll @@ -134,6 +134,82 @@ ret %out } +; SQADD + +define @sqadd_i8( %a, %b) { +; CHECK-LABEL: sqadd_i8: +; CHECK: sqadd z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqadd.x.nxv16i8( %a, + %b) + ret %out +} + +define @sqadd_i16( %a, %b) { +; CHECK-LABEL: sqadd_i16: +; CHECK: sqadd z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqadd.x.nxv8i16( %a, + %b) + ret %out +} + +define @sqadd_i32( %a, %b) { +; CHECK-LABEL: sqadd_i32: +; CHECK: sqadd z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqadd.x.nxv4i32( %a, + %b) + ret %out +} + +define @sqadd_i64( %a, %b) { +; CHECK-LABEL: sqadd_i64: +; CHECK: sqadd z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqadd.x.nxv2i64( %a, + %b) + ret %out +} + +; SQSUB + +define @sqsub_i8( %a, %b) { +; CHECK-LABEL: sqsub_i8: +; CHECK: sqsub z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqsub.x.nxv16i8( %a, + %b) + ret %out +} + +define @sqsub_i16( %a, %b) { +; CHECK-LABEL: sqsub_i16: +; CHECK: sqsub z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqsub.x.nxv8i16( %a, + %b) + ret %out +} + +define @sqsub_i32( %a, %b) { +; CHECK-LABEL: sqsub_i32: +; CHECK: sqsub z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqsub.x.nxv4i32( %a, + %b) + ret %out +} + +define @sqsub_i64( %a, %b) { +; CHECK-LABEL: sqsub_i64: +; CHECK: sqsub z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.sqsub.x.nxv2i64( %a, + %b) + ret %out +} + ; UDOT define @udot_i32( %a, %b, %c) { @@ -169,6 +245,82 @@ ret %out } +; UQADD + +define @uqadd_i8( %a, %b) { +; CHECK-LABEL: uqadd_i8: +; CHECK: uqadd z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqadd.x.nxv16i8( %a, + %b) + ret %out +} + +define @uqadd_i16( %a, %b) { +; CHECK-LABEL: uqadd_i16: +; CHECK: uqadd z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqadd.x.nxv8i16( %a, + %b) + ret %out +} + +define @uqadd_i32( %a, %b) { +; CHECK-LABEL: uqadd_i32: +; CHECK: uqadd z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqadd.x.nxv4i32( %a, + %b) + ret %out +} + +define @uqadd_i64( %a, %b) { +; CHECK-LABEL: uqadd_i64: +; CHECK: uqadd z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqadd.x.nxv2i64( %a, + %b) + ret %out +} + +; UQSUB + +define @uqsub_i8( %a, %b) { +; CHECK-LABEL: uqsub_i8: +; CHECK: uqsub z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqsub.x.nxv16i8( %a, + %b) + ret %out +} + +define @uqsub_i16( %a, %b) { +; CHECK-LABEL: uqsub_i16: +; CHECK: uqsub z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqsub.x.nxv8i16( %a, + %b) + ret %out +} + +define @uqsub_i32( %a, %b) { +; CHECK-LABEL: uqsub_i32: +; CHECK: uqsub z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqsub.x.nxv4i32( %a, + %b) + ret %out +} + +define @uqsub_i64( %a, %b) { +; CHECK-LABEL: uqsub_i64: +; CHECK: uqsub z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uqsub.x.nxv2i64( %a, + %b) + ret %out +} + declare @llvm.aarch64.sve.abs.nxv16i8(, , ) declare @llvm.aarch64.sve.abs.nxv8i16(, , ) declare @llvm.aarch64.sve.abs.nxv4i32(, , ) @@ -185,8 +337,28 @@ declare @llvm.aarch64.sve.sdot.lane.nxv4i32(, , , i32) declare @llvm.aarch64.sve.sdot.lane.nxv2i64(, , , i32) +declare @llvm.aarch64.sve.sqadd.x.nxv16i8(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv8i16(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv4i32(, ) +declare @llvm.aarch64.sve.sqadd.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.sqsub.x.nxv16i8(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv8i16(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv4i32(, ) +declare @llvm.aarch64.sve.sqsub.x.nxv2i64(, ) + declare @llvm.aarch64.sve.udot.nxv4i32(, , ) declare @llvm.aarch64.sve.udot.nxv2i64(, , ) declare @llvm.aarch64.sve.udot.lane.nxv4i32(, , , i32) declare @llvm.aarch64.sve.udot.lane.nxv2i64(, , , i32) + +declare @llvm.aarch64.sve.uqadd.x.nxv16i8(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv8i16(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv4i32(, ) +declare @llvm.aarch64.sve.uqadd.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.uqsub.x.nxv16i8(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv8i16(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv4i32(, ) +declare @llvm.aarch64.sve.uqsub.x.nxv2i64(, )