Index: include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- include/llvm/IR/IntrinsicsAArch64.td +++ include/llvm/IR/IntrinsicsAArch64.td @@ -748,3 +748,22 @@ Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects]>; } + +//===----------------------------------------------------------------------===// +// SVE + +let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". + class AdvSIMD_Merged1VectorArg_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<0>], + [IntrNoMem]>; +} + +// +// Integer arithmetic +// + +def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic; +def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic; Index: lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- lib/Target/AArch64/AArch64SVEInstrInfo.td +++ lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -94,8 +94,8 @@ defm UXTH_ZPmZ : sve_int_un_pred_arit_0_w<0b011, "uxth">; defm SXTW_ZPmZ : sve_int_un_pred_arit_0_d<0b100, "sxtw">; defm UXTW_ZPmZ : sve_int_un_pred_arit_0_d<0b101, "uxtw">; - defm ABS_ZPmZ : sve_int_un_pred_arit_0< 0b110, "abs">; - defm NEG_ZPmZ : sve_int_un_pred_arit_0< 0b111, "neg">; + defm ABS_ZPmZ : sve_int_un_pred_arit_0< 0b110, "abs", int_aarch64_sve_abs>; + defm NEG_ZPmZ : sve_int_un_pred_arit_0< 0b111, "neg", int_aarch64_sve_neg>; defm CLS_ZPmZ : sve_int_un_pred_arit_1< 0b000, "cls">; defm CLZ_ZPmZ : sve_int_un_pred_arit_1< 0b001, "clz">; Index: lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- lib/Target/AArch64/SVEInstrFormats.td +++ lib/Target/AArch64/SVEInstrFormats.td @@ -279,6 +279,14 @@ defm PTRUES : sve_int_ptrue<0b001, "ptrues">; } +//===----------------------------------------------------------------------===// +// SVE pattern match helpers. +//===----------------------------------------------------------------------===// + +class SVE_3_Op_Pat +: Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3)), + (inst $Op1, $Op2, $Op3)>; //===----------------------------------------------------------------------===// // SVE Predicate Misc Group @@ -2835,11 +2843,17 @@ let ElementSize = zprty.ElementSize; } -multiclass sve_int_un_pred_arit_0 opc, string asm> { +multiclass sve_int_un_pred_arit_0 opc, string asm, + SDPatternOperator op> { def _B : sve_int_un_pred_arit<0b00, { opc, 0b0 }, asm, ZPR8>; def _H : sve_int_un_pred_arit<0b01, { opc, 0b0 }, asm, ZPR16>; def _S : sve_int_un_pred_arit<0b10, { opc, 0b0 }, asm, ZPR32>; def _D : sve_int_un_pred_arit<0b11, { opc, 0b0 }, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } multiclass sve_int_un_pred_arit_0_h opc, string asm> { Index: test/CodeGen/AArch64/sve-intrinsics-int-arith.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/sve-intrinsics-int-arith.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; ABS +; + +define @abs_i8( %a, %pg, %b) { +; CHECK-LABEL: abs_i8: +; CHECK: abs z0.b, p0/m, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.abs.nxv16i8( %a, + %pg, + %b) + ret %out +} + +define @abs_i16( %a, %pg, %b) { +; CHECK-LABEL: abs_i16: +; CHECK: abs z0.h, p0/m, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.abs.nxv8i16( %a, + %pg, + %b) + ret %out +} + +define @abs_i32( %a, %pg, %b) { +; CHECK-LABEL: abs_i32: +; CHECK: abs z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.abs.nxv4i32( %a, + %pg, + %b) + ret %out +} + +define @abs_i64( %a, %pg, %b) { +; CHECK-LABEL: abs_i64: +; CHECK: abs z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.abs.nxv2i64( %a, + %pg, + %b) + ret %out +} + +; +; NEG +; + +define @neg_i8( %a, %pg, %b) { +; CHECK-LABEL: neg_i8: +; CHECK: neg z0.b, p0/m, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.neg.nxv16i8( %a, + %pg, + %b) + ret %out +} + +define @neg_i16( %a, %pg, %b) { +; CHECK-LABEL: neg_i16: +; CHECK: neg z0.h, p0/m, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.neg.nxv8i16( %a, + %pg, + %b) + ret %out +} + +define @neg_i32( %a, %pg, %b) { +; CHECK-LABEL: neg_i32: +; CHECK: neg z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.neg.nxv4i32( %a, + %pg, + %b) + ret %out +} + +define @neg_i64( %a, %pg, %b) { +; CHECK-LABEL: neg_i64: +; CHECK: neg z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.neg.nxv2i64( %a, + %pg, + %b) + ret %out +} + +declare @llvm.aarch64.sve.abs.nxv16i8(, , ) +declare @llvm.aarch64.sve.abs.nxv8i16(, , ) +declare @llvm.aarch64.sve.abs.nxv4i32(, , ) +declare @llvm.aarch64.sve.abs.nxv2i64(, , ) + +declare @llvm.aarch64.sve.neg.nxv16i8(, , ) +declare @llvm.aarch64.sve.neg.nxv8i16(, , ) +declare @llvm.aarch64.sve.neg.nxv4i32(, , ) +declare @llvm.aarch64.sve.neg.nxv2i64(, , )