diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1069,12 +1069,6 @@ ], [IntrReadMem, IntrArgMemOnly]>; -class AdvSIMD_1VectorArg_Imm_Intrinsic - : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, - llvm_i32_ty], - [IntrNoMem, ImmArg<1>]>; - class AdvSIMD_ScatterStore_64bitOffset_Intrinsic : Intrinsic<[], [ @@ -1104,12 +1098,6 @@ ], [IntrWriteMem, IntrArgMemOnly, ImmArg<3>]>; -class AdvSIMD_1VectorArg_Imm64_Intrinsic - : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, - llvm_i64_ty], - [IntrNoMem, ImmArg<1>]>; - // // Loads // @@ -1130,14 +1118,6 @@ def int_aarch64_sve_sub : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_subr : AdvSIMD_Pred2VectorArg_Intrinsic; -def int_aarch64_sve_add_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; -def int_aarch64_sve_sub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; -def int_aarch64_sve_subr_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; -def int_aarch64_sve_sqadd_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; -def int_aarch64_sve_uqadd_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; -def int_aarch64_sve_sqsub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; -def int_aarch64_sve_uqsub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; - def int_aarch64_sve_mul : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_smulh : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_umulh : AdvSIMD_Pred2VectorArg_Intrinsic; @@ -1277,10 +1257,6 @@ def int_aarch64_sve_nors : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_nands : AdvSIMD_Pred2VectorArg_Intrinsic; -def int_aarch64_sve_orr_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic; -def int_aarch64_sve_eor_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic; -def int_aarch64_sve_and_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic; - // // Conversion // diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -93,13 +93,13 @@ defm AND_ZPmZ : sve_int_bin_pred_log<0b010, "and", int_aarch64_sve_and>; defm BIC_ZPmZ : sve_int_bin_pred_log<0b011, "bic", int_aarch64_sve_bic>; - defm ADD_ZI : sve_int_arith_imm0<0b000, "add", int_aarch64_sve_add_imm>; - defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", int_aarch64_sve_sub_imm>; - defm SUBR_ZI : sve_int_arith_imm0<0b011, "subr", int_aarch64_sve_subr_imm>; - defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", int_aarch64_sve_sqadd_imm>; - defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", int_aarch64_sve_uqadd_imm>; - defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", int_aarch64_sve_sqsub_imm>; - defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", int_aarch64_sve_uqsub_imm>; + defm ADD_ZI : sve_int_arith_imm0<0b000, "add", add>; + defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", sub>; + defm SUBR_ZI : sve_int_arith_imm0_subr<0b011, "subr", sub>; + defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", saddsat>; + defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat>; + defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>; + defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>; defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>; defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>; @@ -117,9 +117,9 @@ defm EORV_VPZ : sve_int_reduce_2<0b001, "eorv", AArch64eorv_pred>; defm ANDV_VPZ : sve_int_reduce_2<0b010, "andv", AArch64andv_pred>; - defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn", int_aarch64_sve_orr_imm>; - defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", int_aarch64_sve_eor_imm>; - defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", int_aarch64_sve_and_imm>; + defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn", or>; + defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", xor>; + defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", and>; defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", simm8>; defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", simm8>; diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -299,14 +299,19 @@ : Pat<(vtd (op vt1:$Op1)), (inst $Op1)>; +class SVE_1_Op_Imm_OptLsl_Reverse_Pat + : Pat<(vt (op (vt (AArch64dup (it (cpx i32:$imm, i32:$shift)))), (vt zprty:$Op1))), + (inst $Op1, i32:$imm, i32:$shift)>; + class SVE_1_Op_Imm_OptLsl_Pat - : Pat<(vt (op (vt zprty:$Op1), (i32 (cpx i32:$imm, i32:$shift)))), + ValueType it, ComplexPattern cpx, Instruction inst> + : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm, i32:$shift)))))), (inst $Op1, i32:$imm, i32:$shift)>; class SVE_1_Op_Imm_Log_Pat - : Pat<(vt (op (vt zprty:$Op1), (i64 (cpx i64:$imm)))), + ValueType it, ComplexPattern cpx, Instruction inst> + : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i64:$imm)))))), (inst $Op1, i64:$imm)>; class SVE_2_Op_Pat opc, string asm, string alias, SDPatternOperator op> { def NAME : sve_int_log_imm; - def : SVE_1_Op_Imm_Log_Pat(NAME)>; - def : SVE_1_Op_Imm_Log_Pat(NAME)>; - def : SVE_1_Op_Imm_Log_Pat(NAME)>; - def : SVE_1_Op_Imm_Log_Pat(NAME)>; + def : SVE_1_Op_Imm_Log_Pat(NAME)>; + def : SVE_1_Op_Imm_Log_Pat(NAME)>; + def : SVE_1_Op_Imm_Log_Pat(NAME)>; + def : SVE_1_Op_Imm_Log_Pat(NAME)>; def : InstAlias(NAME) ZPR8:$Zdn, sve_logical_imm8:$imm), 4>; @@ -3320,10 +3325,22 @@ def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>; def _D : sve_int_arith_imm0<0b11, opc, asm, ZPR64, addsub_imm8_opt_lsl_i64>; - def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _B)>; - def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _H)>; - def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _S)>; - def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _D)>; + def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _B)>; + def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _H)>; + def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _S)>; + def : SVE_1_Op_Imm_OptLsl_Pat(NAME # _D)>; +} + +multiclass sve_int_arith_imm0_subr opc, string asm, SDPatternOperator op> { + def _B : sve_int_arith_imm0<0b00, opc, asm, ZPR8, addsub_imm8_opt_lsl_i8>; + def _H : sve_int_arith_imm0<0b01, opc, asm, ZPR16, addsub_imm8_opt_lsl_i16>; + def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>; + def _D : sve_int_arith_imm0<0b11, opc, asm, ZPR64, addsub_imm8_opt_lsl_i64>; + + def : SVE_1_Op_Imm_OptLsl_Reverse_Pat(NAME # _B)>; + def : SVE_1_Op_Imm_OptLsl_Reverse_Pat(NAME # _H)>; + def : SVE_1_Op_Imm_OptLsl_Reverse_Pat(NAME # _S)>; + def : SVE_1_Op_Imm_OptLsl_Reverse_Pat(NAME # _D)>; } class sve_int_arith_imm sz8_64, bits<6> opc, string asm, diff --git a/llvm/test/CodeGen/AArch64/sve-int-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-int-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-int-imm.ll @@ -1,471 +1,519 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s -define @add_imm_i8_low( %a) { -; CHECK-LABEL: add_imm_i8_low +; +; SVE Arith Vector Immediate Unpredicated CodeGen +; + +; ADD +define @add_i8_low( %a) { +; CHECK-LABEL: add_i8_low ; CHECK: add z0.b, z0.b, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.add.imm.nxv16i8( %a, - i32 30) + %elt = insertelement undef, i8 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = add %a, %splat ret %res } -define @add_imm_i16_low( %a) { -; CHECK-LABEL: add_imm_i16_low +define @add_i16_low( %a) { +; CHECK-LABEL: add_i16_low ; CHECK: add z0.h, z0.h, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.add.imm.nxv8i16( %a, - i32 30) + %elt = insertelement undef, i16 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = add %a, %splat ret %res } -define @add_imm_i16_high( %a) { -; CHECK-LABEL: add_imm_i16_high +define @add_i16_high( %a) { +; CHECK-LABEL: add_i16_high ; CHECK: add z0.h, z0.h, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.add.imm.nxv8i16( %a, - i32 1024) + %elt = insertelement undef, i16 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = add %a, %splat ret %res } -define @add_imm_i32_low( %a) { -; CHECK-LABEL: add_imm_i32_low +define @add_i32_low( %a) { +; CHECK-LABEL: add_i32_low ; CHECK: add z0.s, z0.s, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.add.imm.nxv4i32( %a, - i32 30) + %elt = insertelement undef, i32 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = add %a, %splat ret %res } -define @add_imm_i32_high( %a) { -; CHECK-LABEL: add_imm_i32_high +define @add_i32_high( %a) { +; CHECK-LABEL: add_i32_high ; CHECK: add z0.s, z0.s, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.add.imm.nxv4i32( %a, - i32 1024) + %elt = insertelement undef, i32 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = add %a, %splat ret %res } -define @add_imm_i64_low( %a) { -; CHECK-LABEL: add_imm_i64_low +define @add_i64_low( %a) { +; CHECK-LABEL: add_i64_low ; CHECK: add z0.d, z0.d, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.add.imm.nxv2i64( %a, - i32 30) + %elt = insertelement undef, i64 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = add %a, %splat ret %res } -define @add_imm_i64_high( %a) { -; CHECK-LABEL: add_imm_i64_high +define @add_i64_high( %a) { +; CHECK-LABEL: add_i64_high ; CHECK: add z0.d, z0.d, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.add.imm.nxv2i64( %a, - i32 1024) + %elt = insertelement undef, i64 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = add %a, %splat ret %res } -define @sub_imm_i8_low( %a) { -; CHECK-LABEL: sub_imm_i8_low -; CHECK: sub z0.b, z0.b, #30 +; SUBR +define @subr_i8_low( %a) { +; CHECK-LABEL: subr_i8_low +; CHECK: subr z0.b, z0.b, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sub.imm.nxv16i8( %a, - i32 30) + %elt = insertelement undef, i8 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %splat, %a ret %res } -define @sub_imm_i16_low( %a) { -; CHECK-LABEL: sub_imm_i16_low -; CHECK: sub z0.h, z0.h, #30 +define @subr_i16_low( %a) { +; CHECK-LABEL: subr_i16_low +; CHECK: subr z0.h, z0.h, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sub.imm.nxv8i16( %a, - i32 30) + %elt = insertelement undef, i16 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %splat, %a ret %res } -define @sub_imm_i16_high( %a) { -; CHECK-LABEL: sub_imm_i16_high -; CHECK: sub z0.h, z0.h, #1024 +define @subr_i16_high( %a) { +; CHECK-LABEL: subr_i16_high +; CHECK: subr z0.h, z0.h, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sub.imm.nxv8i16( %a, - i32 1024) + %elt = insertelement undef, i16 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %splat, %a ret %res } -define @sub_imm_i32_low( %a) { -; CHECK-LABEL: sub_imm_i32_low -; CHECK: sub z0.s, z0.s, #30 +define @subr_i32_low( %a) { +; CHECK-LABEL: subr_i32_low +; CHECK: subr z0.s, z0.s, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sub.imm.nxv4i32( %a, - i32 30) + %elt = insertelement undef, i32 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %splat, %a ret %res } -define @sub_imm_i32_high( %a) { -; CHECK-LABEL: sub_imm_i32_high -; CHECK: sub z0.s, z0.s, #1024 +define @subr_i32_high( %a) { +; CHECK-LABEL: subr_i32_high +; CHECK: subr z0.s, z0.s, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sub.imm.nxv4i32( %a, - i32 1024) + %elt = insertelement undef, i32 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %splat, %a ret %res } -define @sub_imm_i64_low( %a) { -; CHECK-LABEL: sub_imm_i64_low -; CHECK: sub z0.d, z0.d, #30 +define @subr_i64_low( %a) { +; CHECK-LABEL: subr_i64_low +; CHECK: subr z0.d, z0.d, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sub.imm.nxv2i64( %a, - i32 30) + %elt = insertelement undef, i64 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %splat, %a ret %res } -define @sub_imm_i64_high( %a) { -; CHECK-LABEL: sub_imm_i64_high -; CHECK: sub z0.d, z0.d, #1024 +define @subr_i64_high( %a) { +; CHECK-LABEL: subr_i64_high +; CHECK: subr z0.d, z0.d, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sub.imm.nxv2i64( %a, - i32 1024) + %elt = insertelement undef, i64 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %splat, %a ret %res } -define @subr_imm_i8_low( %a) { -; CHECK-LABEL: subr_imm_i8_low -; CHECK: subr z0.b, z0.b, #30 +; SUB +define @sub_i8_low( %a) { +; CHECK-LABEL: sub_i8_low +; CHECK: sub z0.b, z0.b, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.subr.imm.nxv16i8( %a, - i32 30) + %elt = insertelement undef, i8 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %a, %splat ret %res } -define @subr_imm_i16_low( %a) { -; CHECK-LABEL: subr_imm_i16_low -; CHECK: subr z0.h, z0.h, #30 +define @sub_i16_low( %a) { +; CHECK-LABEL: sub_i16_low +; CHECK: sub z0.h, z0.h, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.subr.imm.nxv8i16( %a, - i32 30) + %elt = insertelement undef, i16 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %a, %splat ret %res } -define @subr_imm_i16_high( %a) { -; CHECK-LABEL: subr_imm_i16_high -; CHECK: subr z0.h, z0.h, #1024 +define @sub_i16_high( %a) { +; CHECK-LABEL: sub_i16_high +; CHECK: sub z0.h, z0.h, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.subr.imm.nxv8i16( %a, - i32 1024) + %elt = insertelement undef, i16 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %a, %splat ret %res } -define @subr_imm_i32_low( %a) { -; CHECK-LABEL: subr_imm_i32_low -; CHECK: subr z0.s, z0.s, #30 +define @sub_i32_low( %a) { +; CHECK-LABEL: sub_i32_low +; CHECK: sub z0.s, z0.s, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.subr.imm.nxv4i32( %a, - i32 30) + %elt = insertelement undef, i32 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %a, %splat ret %res } -define @subr_imm_i32_high( %a) { -; CHECK-LABEL: subr_imm_i32_high -; CHECK: subr z0.s, z0.s, #1024 +define @sub_i32_high( %a) { +; CHECK-LABEL: sub_i32_high +; CHECK: sub z0.s, z0.s, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.subr.imm.nxv4i32( %a, - i32 1024) + %elt = insertelement undef, i32 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %a, %splat ret %res } -define @subr_imm_i64_low( %a) { -; CHECK-LABEL: subr_imm_i64_low -; CHECK: subr z0.d, z0.d, #30 +define @sub_i64_low( %a) { +; CHECK-LABEL: sub_i64_low +; CHECK: sub z0.d, z0.d, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.subr.imm.nxv2i64( %a, - i32 30) + %elt = insertelement undef, i64 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %a, %splat ret %res } -define @subr_imm_i64_high( %a) { -; CHECK-LABEL: subr_imm_i64_high -; CHECK: subr z0.d, z0.d, #1024 +define @sub_i64_high( %a) { +; CHECK-LABEL: sub_i64_high +; CHECK: sub z0.d, z0.d, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.subr.imm.nxv2i64( %a, - i32 1024) + %elt = insertelement undef, i64 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = sub %a, %splat ret %res } -define @sqadd_imm_i8_low( %a) { -; CHECK-LABEL: sqadd_imm_i8_low +; SQADD +define @sqadd_i8_low( %a) { +; CHECK-LABEL: sqadd_i8_low ; CHECK: sqadd z0.b, z0.b, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqadd.imm.nxv16i8( %a, - i32 30) + %elt = insertelement undef, i8 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.sadd.sat.nxv16i8( %a, %splat) ret %res } -define @sqadd_imm_i16_low( %a) { -; CHECK-LABEL: sqadd_imm_i16_low +define @sqadd_i16_low( %a) { +; CHECK-LABEL: sqadd_i16_low ; CHECK: sqadd z0.h, z0.h, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqadd.imm.nxv8i16( %a, - i32 30) + %elt = insertelement undef, i16 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.sadd.sat.nxv8i16( %a, %splat) ret %res } -define @sqadd_imm_i16_high( %a) { -; CHECK-LABEL: sqadd_imm_i16_high +define @sqadd_i16_high( %a) { +; CHECK-LABEL: sqadd_i16_high ; CHECK: sqadd z0.h, z0.h, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqadd.imm.nxv8i16( %a, - i32 1024) + %elt = insertelement undef, i16 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.sadd.sat.nxv8i16( %a, %splat) ret %res } -define @sqadd_imm_i32_low( %a) { -; CHECK-LABEL: sqadd_imm_i32_low +define @sqadd_i32_low( %a) { +; CHECK-LABEL: sqadd_i32_low ; CHECK: sqadd z0.s, z0.s, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqadd.imm.nxv4i32( %a, - i32 30) + %elt = insertelement undef, i32 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.sadd.sat.nxv4i32( %a, %splat) ret %res } -define @sqadd_imm_i32_high( %a) { -; CHECK-LABEL: sqadd_imm_i32_high +define @sqadd_i32_high( %a) { +; CHECK-LABEL: sqadd_i32_high ; CHECK: sqadd z0.s, z0.s, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqadd.imm.nxv4i32( %a, - i32 1024) + %elt = insertelement undef, i32 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.sadd.sat.nxv4i32( %a, %splat) ret %res } -define @sqadd_imm_i64_low( %a) { -; CHECK-LABEL: sqadd_imm_i64_low +define @sqadd_i64_low( %a) { +; CHECK-LABEL: sqadd_i64_low ; CHECK: sqadd z0.d, z0.d, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqadd.imm.nxv2i64( %a, - i32 30) + %elt = insertelement undef, i64 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.sadd.sat.nxv2i64( %a, %splat) ret %res } -define @sqadd_imm_i64_high( %a) { -; CHECK-LABEL: sqadd_imm_i64_high +define @sqadd_i64_high( %a) { +; CHECK-LABEL: sqadd_i64_high ; CHECK: sqadd z0.d, z0.d, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqadd.imm.nxv2i64( %a, - i32 1024) + %elt = insertelement undef, i64 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.sadd.sat.nxv2i64( %a, %splat) ret %res } -define @uqadd_imm_i8_low( %a) { -; CHECK-LABEL: uqadd_imm_i8_low +; UQADD +define @uqadd_i8_low( %a) { +; CHECK-LABEL: uqadd_i8_low ; CHECK: uqadd z0.b, z0.b, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqadd.imm.nxv16i8( %a, - i32 30) + %elt = insertelement undef, i8 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.uadd.sat.nxv16i8( %a, %splat) ret %res } -define @uqadd_imm_i16_low( %a) { -; CHECK-LABEL: uqadd_imm_i16_low +define @uqadd_i16_low( %a) { +; CHECK-LABEL: uqadd_i16_low ; CHECK: uqadd z0.h, z0.h, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqadd.imm.nxv8i16( %a, - i32 30) + %elt = insertelement undef, i16 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.uadd.sat.nxv8i16( %a, %splat) ret %res } -define @uqadd_imm_i16_high( %a) { -; CHECK-LABEL: uqadd_imm_i16_high +define @uqadd_i16_high( %a) { +; CHECK-LABEL: uqadd_i16_high ; CHECK: uqadd z0.h, z0.h, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqadd.imm.nxv8i16( %a, - i32 1024) + %elt = insertelement undef, i16 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.uadd.sat.nxv8i16( %a, %splat) ret %res } -define @uqadd_imm_i32_low( %a) { -; CHECK-LABEL: uqadd_imm_i32_low +define @uqadd_i32_low( %a) { +; CHECK-LABEL: uqadd_i32_low ; CHECK: uqadd z0.s, z0.s, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqadd.imm.nxv4i32( %a, - i32 30) + %elt = insertelement undef, i32 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.uadd.sat.nxv4i32( %a, %splat) ret %res } -define @uqadd_imm_i32_high( %a) { -; CHECK-LABEL: uqadd_imm_i32_high +define @uqadd_i32_high( %a) { +; CHECK-LABEL: uqadd_i32_high ; CHECK: uqadd z0.s, z0.s, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqadd.imm.nxv4i32( %a, - i32 1024) + %elt = insertelement undef, i32 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.uadd.sat.nxv4i32( %a, %splat) ret %res } -define @uqadd_imm_i64_low( %a) { -; CHECK-LABEL: uqadd_imm_i64_low +define @uqadd_i64_low( %a) { +; CHECK-LABEL: uqadd_i64_low ; CHECK: uqadd z0.d, z0.d, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqadd.imm.nxv2i64( %a, - i32 30) + %elt = insertelement undef, i64 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.uadd.sat.nxv2i64( %a, %splat) ret %res } -define @uqadd_imm_i64_high( %a) { -; CHECK-LABEL: uqadd_imm_i64_high +define @uqadd_i64_high( %a) { +; CHECK-LABEL: uqadd_i64_high ; CHECK: uqadd z0.d, z0.d, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqadd.imm.nxv2i64( %a, - i32 1024) + %elt = insertelement undef, i64 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.uadd.sat.nxv2i64( %a, %splat) ret %res } -define @sqsub_imm_i8_low( %a) { -; CHECK-LABEL: sqsub_imm_i8_low +; SQSUB +define @sqsub_i8_low( %a) { +; CHECK-LABEL: sqsub_i8_low ; CHECK: sqsub z0.b, z0.b, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqsub.imm.nxv16i8( %a, - i32 30) + %elt = insertelement undef, i8 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.ssub.sat.nxv16i8( %a, %splat) ret %res } -define @sqsub_imm_i16_low( %a) { -; CHECK-LABEL: sqsub_imm_i16_low +define @sqsub_i16_low( %a) { +; CHECK-LABEL: sqsub_i16_low ; CHECK: sqsub z0.h, z0.h, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqsub.imm.nxv8i16( %a, - i32 30) + %elt = insertelement undef, i16 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.ssub.sat.nxv8i16( %a, %splat) ret %res } -define @sqsub_imm_i16_high( %a) { -; CHECK-LABEL: sqsub_imm_i16_high +define @sqsub_i16_high( %a) { +; CHECK-LABEL: sqsub_i16_high ; CHECK: sqsub z0.h, z0.h, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqsub.imm.nxv8i16( %a, - i32 1024) + %elt = insertelement undef, i16 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.ssub.sat.nxv8i16( %a, %splat) ret %res } -define @sqsub_imm_i32_low( %a) { -; CHECK-LABEL: sqsub_imm_i32_low +define @sqsub_i32_low( %a) { +; CHECK-LABEL: sqsub_i32_low ; CHECK: sqsub z0.s, z0.s, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqsub.imm.nxv4i32( %a, - i32 30) + %elt = insertelement undef, i32 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.ssub.sat.nxv4i32( %a, %splat) ret %res } -define @sqsub_imm_i32_high( %a) { -; CHECK-LABEL: sqsub_imm_i32_high +define @sqsub_i32_high( %a) { +; CHECK-LABEL: sqsub_i32_high ; CHECK: sqsub z0.s, z0.s, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqsub.imm.nxv4i32( %a, - i32 1024) + %elt = insertelement undef, i32 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.ssub.sat.nxv4i32( %a, %splat) ret %res } -define @sqsub_imm_i64_low( %a) { -; CHECK-LABEL: sqsub_imm_i64_low +define @sqsub_i64_low( %a) { +; CHECK-LABEL: sqsub_i64_low ; CHECK: sqsub z0.d, z0.d, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqsub.imm.nxv2i64( %a, - i32 30) + %elt = insertelement undef, i64 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.ssub.sat.nxv2i64( %a, %splat) ret %res } -define @sqsub_imm_i64_high( %a) { -; CHECK-LABEL: sqsub_imm_i64_high +define @sqsub_i64_high( %a) { +; CHECK-LABEL: sqsub_i64_high ; CHECK: sqsub z0.d, z0.d, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sqsub.imm.nxv2i64( %a, - i32 1024) + %elt = insertelement undef, i64 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.ssub.sat.nxv2i64( %a, %splat) ret %res } -define @uqsub_imm_i8_low( %a) { -; CHECK-LABEL: uqsub_imm_i8_low +; UQSUB +define @uqsub_i8_low( %a) { +; CHECK-LABEL: uqsub_i8_low ; CHECK: uqsub z0.b, z0.b, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqsub.imm.nxv16i8( %a, - i32 30) + %elt = insertelement undef, i8 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.usub.sat.nxv16i8( %a, %splat) ret %res } -define @uqsub_imm_i16_low( %a) { -; CHECK-LABEL: uqsub_imm_i16_low +define @uqsub_i16_low( %a) { +; CHECK-LABEL: uqsub_i16_low ; CHECK: uqsub z0.h, z0.h, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqsub.imm.nxv8i16( %a, - i32 30) + %elt = insertelement undef, i16 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.usub.sat.nxv8i16( %a, %splat) ret %res } -define @uqsub_imm_i16_high( %a) { -; CHECK-LABEL: uqsub_imm_i16_high +define @uqsub_i16_high( %a) { +; CHECK-LABEL: uqsub_i16_high ; CHECK: uqsub z0.h, z0.h, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqsub.imm.nxv8i16( %a, - i32 1024) + %elt = insertelement undef, i16 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.usub.sat.nxv8i16( %a, %splat) ret %res } -define @uqsub_imm_i32_low( %a) { -; CHECK-LABEL: uqsub_imm_i32_low +define @uqsub_i32_low( %a) { +; CHECK-LABEL: uqsub_i32_low ; CHECK: uqsub z0.s, z0.s, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqsub.imm.nxv4i32( %a, - i32 30) + %elt = insertelement undef, i32 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.usub.sat.nxv4i32( %a, %splat) ret %res } -define @uqsub_imm_i32_high( %a) { -; CHECK-LABEL: uqsub_imm_i32_high +define @uqsub_i32_high( %a) { +; CHECK-LABEL: uqsub_i32_high ; CHECK: uqsub z0.s, z0.s, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqsub.imm.nxv4i32( %a, - i32 1024) + %elt = insertelement undef, i32 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.usub.sat.nxv4i32( %a, %splat) ret %res } -define @uqsub_imm_i64_low( %a) { -; CHECK-LABEL: uqsub_imm_i64_low +define @uqsub_i64_low( %a) { +; CHECK-LABEL: uqsub_i64_low ; CHECK: uqsub z0.d, z0.d, #30 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqsub.imm.nxv2i64( %a, - i32 30) + %elt = insertelement undef, i64 30, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.usub.sat.nxv2i64( %a, %splat) ret %res } -define @uqsub_imm_i64_high( %a) { -; CHECK-LABEL: uqsub_imm_i64_high +define @uqsub_i64_high( %a) { +; CHECK-LABEL: uqsub_i64_high ; CHECK: uqsub z0.d, z0.d, #1024 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.uqsub.imm.nxv2i64( %a, - i32 1024) + %elt = insertelement undef, i64 1024, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = call @llvm.usub.sat.nxv2i64( %a, %splat) ret %res } -declare @llvm.aarch64.sve.add.imm.nxv16i8(, i32) -declare @llvm.aarch64.sve.add.imm.nxv8i16(, i32) -declare @llvm.aarch64.sve.add.imm.nxv4i32(, i32) -declare @llvm.aarch64.sve.add.imm.nxv2i64(, i32) -declare @llvm.aarch64.sve.sub.imm.nxv16i8(, i32) -declare @llvm.aarch64.sve.sub.imm.nxv8i16(, i32) -declare @llvm.aarch64.sve.sub.imm.nxv4i32(, i32) -declare @llvm.aarch64.sve.sub.imm.nxv2i64(, i32) -declare @llvm.aarch64.sve.subr.imm.nxv16i8(, i32) -declare @llvm.aarch64.sve.subr.imm.nxv8i16(, i32) -declare @llvm.aarch64.sve.subr.imm.nxv4i32(, i32) -declare @llvm.aarch64.sve.subr.imm.nxv2i64(, i32) -declare @llvm.aarch64.sve.sqadd.imm.nxv16i8(, i32) -declare @llvm.aarch64.sve.sqadd.imm.nxv8i16(, i32) -declare @llvm.aarch64.sve.sqadd.imm.nxv4i32(, i32) -declare @llvm.aarch64.sve.sqadd.imm.nxv2i64(, i32) -declare @llvm.aarch64.sve.uqadd.imm.nxv16i8(, i32) -declare @llvm.aarch64.sve.uqadd.imm.nxv8i16(, i32) -declare @llvm.aarch64.sve.uqadd.imm.nxv4i32(, i32) -declare @llvm.aarch64.sve.uqadd.imm.nxv2i64(, i32) -declare @llvm.aarch64.sve.sqsub.imm.nxv16i8(, i32) -declare @llvm.aarch64.sve.sqsub.imm.nxv8i16(, i32) -declare @llvm.aarch64.sve.sqsub.imm.nxv4i32(, i32) -declare @llvm.aarch64.sve.sqsub.imm.nxv2i64(, i32) -declare @llvm.aarch64.sve.uqsub.imm.nxv16i8(, i32) -declare @llvm.aarch64.sve.uqsub.imm.nxv8i16(, i32) -declare @llvm.aarch64.sve.uqsub.imm.nxv4i32(, i32) -declare @llvm.aarch64.sve.uqsub.imm.nxv2i64(, i32) +declare @llvm.sadd.sat.nxv16i8(, ) +declare @llvm.sadd.sat.nxv8i16(, ) +declare @llvm.sadd.sat.nxv4i32(, ) +declare @llvm.sadd.sat.nxv2i64(, ) +declare @llvm.uadd.sat.nxv16i8(, ) +declare @llvm.uadd.sat.nxv8i16(, ) +declare @llvm.uadd.sat.nxv4i32(, ) +declare @llvm.uadd.sat.nxv2i64(, ) +declare @llvm.ssub.sat.nxv16i8(, ) +declare @llvm.ssub.sat.nxv8i16(, ) +declare @llvm.ssub.sat.nxv4i32(, ) +declare @llvm.ssub.sat.nxv2i64(, ) +declare @llvm.usub.sat.nxv16i8(, ) +declare @llvm.usub.sat.nxv8i16(, ) +declare @llvm.usub.sat.nxv4i32(, ) +declare @llvm.usub.sat.nxv2i64(, ) diff --git a/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll @@ -1,11 +1,17 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; +; SVE Logical Vector Immediate Unpredicated CodeGen +; + +; ORR define @orr_i8( %a) { ; CHECK-LABEL: orr_i8: ; CHECK: orr z0.b, z0.b, #0xf ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.orr.imm.nxv16i8( %a, - i64 15) + %elt = insertelement undef, i8 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = or %a, %splat ret %res } @@ -13,8 +19,9 @@ ; CHECK-LABEL: orr_i16: ; CHECK: orr z0.h, z0.h, #0xfc07 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.orr.imm.nxv8i16( %a, - i64 64519) + %elt = insertelement undef, i16 64519, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = or %a, %splat ret %res } @@ -22,8 +29,9 @@ ; CHECK-LABEL: orr_i32: ; CHECK: orr z0.s, z0.s, #0xffff00 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.orr.imm.nxv4i32( %a, - i64 16776960) + %elt = insertelement undef, i32 16776960, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = or %a, %splat ret %res } @@ -31,17 +39,20 @@ ; CHECK-LABEL: orr_i64: ; CHECK: orr z0.d, z0.d, #0xfffc000000000000 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.orr.imm.nxv2i64( %a, - i64 18445618173802708992) + %elt = insertelement undef, i64 18445618173802708992, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = or %a, %splat ret %res } +; EOR define @eor_i8( %a) { ; CHECK-LABEL: eor_i8: ; CHECK: eor z0.b, z0.b, #0xf ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.eor.imm.nxv16i8( %a, - i64 15) + %elt = insertelement undef, i8 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = xor %a, %splat ret %res } @@ -49,8 +60,9 @@ ; CHECK-LABEL: eor_i16: ; CHECK: eor z0.h, z0.h, #0xfc07 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.eor.imm.nxv8i16( %a, - i64 64519) + %elt = insertelement undef, i16 64519, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = xor %a, %splat ret %res } @@ -58,8 +70,9 @@ ; CHECK-LABEL: eor_i32: ; CHECK: eor z0.s, z0.s, #0xffff00 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.eor.imm.nxv4i32( %a, - i64 16776960) + %elt = insertelement undef, i32 16776960, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = xor %a, %splat ret %res } @@ -67,17 +80,20 @@ ; CHECK-LABEL: eor_i64: ; CHECK: eor z0.d, z0.d, #0xfffc000000000000 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.eor.imm.nxv2i64( %a, - i64 18445618173802708992) + %elt = insertelement undef, i64 18445618173802708992, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = xor %a, %splat ret %res } +; AND define @and_i8( %a) { ; CHECK-LABEL: and_i8: ; CHECK: and z0.b, z0.b, #0xf ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.and.imm.nxv16i8( %a, - i64 15) + %elt = insertelement undef, i8 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = and %a, %splat ret %res } @@ -85,8 +101,9 @@ ; CHECK-LABEL: and_i16: ; CHECK: and z0.h, z0.h, #0xfc07 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.and.imm.nxv8i16( %a, - i64 64519) + %elt = insertelement undef, i16 64519, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = and %a, %splat ret %res } @@ -94,8 +111,9 @@ ; CHECK-LABEL: and_i32: ; CHECK: and z0.s, z0.s, #0xffff00 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.and.imm.nxv4i32( %a, - i64 16776960) + %elt = insertelement undef, i32 16776960, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = and %a, %splat ret %res } @@ -103,20 +121,8 @@ ; CHECK-LABEL: and_i64: ; CHECK: and z0.d, z0.d, #0xfffc000000000000 ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.and.imm.nxv2i64( %a, - i64 18445618173802708992) + %elt = insertelement undef, i64 18445618173802708992, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = and %a, %splat ret %res } - -declare @llvm.aarch64.sve.orr.imm.nxv16i8(, i64) -declare @llvm.aarch64.sve.orr.imm.nxv8i16(, i64) -declare @llvm.aarch64.sve.orr.imm.nxv4i32(, i64) -declare @llvm.aarch64.sve.orr.imm.nxv2i64(, i64) -declare @llvm.aarch64.sve.eor.imm.nxv16i8(, i64) -declare @llvm.aarch64.sve.eor.imm.nxv8i16(, i64) -declare @llvm.aarch64.sve.eor.imm.nxv4i32(, i64) -declare @llvm.aarch64.sve.eor.imm.nxv2i64(, i64) -declare @llvm.aarch64.sve.and.imm.nxv16i8(, i64) -declare @llvm.aarch64.sve.and.imm.nxv8i16(, i64) -declare @llvm.aarch64.sve.and.imm.nxv4i32(, i64) -declare @llvm.aarch64.sve.and.imm.nxv2i64(, i64)