Index: llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td @@ -749,9 +749,6 @@ [IntrNoMem, IntrHasSideEffects]>; } -//===----------------------------------------------------------------------===// -// SVE - def llvm_nxv2i1_ty : LLVMType; def llvm_nxv4i1_ty : LLVMType; def llvm_nxv8i1_ty : LLVMType; @@ -764,6 +761,13 @@ def llvm_nxv2f64_ty : LLVMType; let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". + class AdvSIMD_Merged1VectorArg_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<0>], + [IntrNoMem]>; + // This class of intrinsics are not intended to be useful within LLVM IR but // are instead here to support some of the more regid parts of the ACLE. class Builtin_SVCVT @@ -771,8 +775,21 @@ Intrinsic<[OUT], [OUT, llvm_nxv16i1_ty, IN], [IntrNoMem]>; } +//===----------------------------------------------------------------------===// +// SVE + +let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". + +// +// Integer arithmetic +// + +def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic; +def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic; + // // Floating-point comparisons // def int_aarch64_sve_fcvtzs_i32f16 : Builtin_SVCVT<"svcvt_s32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>; +} Index: llvm/trunk/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/trunk/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -94,8 +94,8 @@ defm UXTH_ZPmZ : sve_int_un_pred_arit_0_w<0b011, "uxth">; defm SXTW_ZPmZ : sve_int_un_pred_arit_0_d<0b100, "sxtw">; defm UXTW_ZPmZ : sve_int_un_pred_arit_0_d<0b101, "uxtw">; - defm ABS_ZPmZ : sve_int_un_pred_arit_0< 0b110, "abs">; - defm NEG_ZPmZ : sve_int_un_pred_arit_0< 0b111, "neg">; + defm ABS_ZPmZ : sve_int_un_pred_arit_0< 0b110, "abs", int_aarch64_sve_abs>; + defm NEG_ZPmZ : sve_int_un_pred_arit_0< 0b111, "neg", int_aarch64_sve_neg>; defm CLS_ZPmZ : sve_int_un_pred_arit_1< 0b000, "cls">; defm CLZ_ZPmZ : sve_int_un_pred_arit_1< 0b001, "clz">; Index: llvm/trunk/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/trunk/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/trunk/lib/Target/AArch64/SVEInstrFormats.td @@ -279,6 +279,14 @@ defm PTRUES : sve_int_ptrue<0b001, "ptrues">; } +//===----------------------------------------------------------------------===// +// SVE pattern match helpers. +//===----------------------------------------------------------------------===// + +class SVE_3_Op_Pat +: Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3)), + (inst $Op1, $Op2, $Op3)>; //===----------------------------------------------------------------------===// // SVE Predicate Misc Group @@ -2835,11 +2843,17 @@ let ElementSize = zprty.ElementSize; } -multiclass sve_int_un_pred_arit_0 opc, string asm> { +multiclass sve_int_un_pred_arit_0 opc, string asm, + SDPatternOperator op> { def _B : sve_int_un_pred_arit<0b00, { opc, 0b0 }, asm, ZPR8>; def _H : sve_int_un_pred_arit<0b01, { opc, 0b0 }, asm, ZPR16>; def _S : sve_int_un_pred_arit<0b10, { opc, 0b0 }, asm, ZPR32>; def _D : sve_int_un_pred_arit<0b11, { opc, 0b0 }, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } multiclass sve_int_un_pred_arit_0_h opc, string asm> { Index: llvm/trunk/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll +++ llvm/trunk/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; ABS +; + +define @abs_i8( %a, %pg, %b) { +; CHECK-LABEL: abs_i8: +; CHECK: abs z0.b, p0/m, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.abs.nxv16i8( %a, + %pg, + %b) + ret %out +} + +define @abs_i16( %a, %pg, %b) { +; CHECK-LABEL: abs_i16: +; CHECK: abs z0.h, p0/m, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.abs.nxv8i16( %a, + %pg, + %b) + ret %out +} + +define @abs_i32( %a, %pg, %b) { +; CHECK-LABEL: abs_i32: +; CHECK: abs z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.abs.nxv4i32( %a, + %pg, + %b) + ret %out +} + +define @abs_i64( %a, %pg, %b) { +; CHECK-LABEL: abs_i64: +; CHECK: abs z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.abs.nxv2i64( %a, + %pg, + %b) + ret %out +} + +; +; NEG +; + +define @neg_i8( %a, %pg, %b) { +; CHECK-LABEL: neg_i8: +; CHECK: neg z0.b, p0/m, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.neg.nxv16i8( %a, + %pg, + %b) + ret %out +} + +define @neg_i16( %a, %pg, %b) { +; CHECK-LABEL: neg_i16: +; CHECK: neg z0.h, p0/m, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.neg.nxv8i16( %a, + %pg, + %b) + ret %out +} + +define @neg_i32( %a, %pg, %b) { +; CHECK-LABEL: neg_i32: +; CHECK: neg z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.neg.nxv4i32( %a, + %pg, + %b) + ret %out +} + +define @neg_i64( %a, %pg, %b) { +; CHECK-LABEL: neg_i64: +; CHECK: neg z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.neg.nxv2i64( %a, + %pg, + %b) + ret %out +} + +declare @llvm.aarch64.sve.abs.nxv16i8(, , ) +declare @llvm.aarch64.sve.abs.nxv8i16(, , ) +declare @llvm.aarch64.sve.abs.nxv4i32(, , ) +declare @llvm.aarch64.sve.abs.nxv2i64(, , ) + +declare @llvm.aarch64.sve.neg.nxv16i8(, , ) +declare @llvm.aarch64.sve.neg.nxv8i16(, , ) +declare @llvm.aarch64.sve.neg.nxv4i32(, , ) +declare @llvm.aarch64.sve.neg.nxv2i64(, , )