diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -375,6 +375,9 @@ (sub node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3)), // sub(a, select(mask, mul(b, c), splat(0))) -> mls(a, mask, b, c) (sub node:$op1, (vselect node:$pred, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3), (SVEDup0)))]>; +def AArch64eor3 : PatFrags<(ops node:$op1, node:$op2, node:$op3), + [(int_aarch64_sve_eor3 node:$op1, node:$op2, node:$op3), + (xor node:$op1, (xor node:$op2, node:$op3))]>; class fma_patfrags : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3), @@ -3504,7 +3507,7 @@ defm FMLSLT_ZZZ_SHH : sve2_fp_mla_long<0b11, "fmlslt", int_aarch64_sve_fmlslt>; // SVE2 bitwise ternary operations - defm EOR3_ZZZZ : sve2_int_bitwise_ternary_op<0b000, "eor3", int_aarch64_sve_eor3>; + defm EOR3_ZZZZ : sve2_int_bitwise_ternary_op<0b000, "eor3", AArch64eor3>; defm BCAX_ZZZZ : sve2_int_bitwise_ternary_op<0b010, "bcax", int_aarch64_sve_bcax>; defm BSL_ZZZZ : sve2_int_bitwise_ternary_op<0b001, "bsl", int_aarch64_sve_bsl, AArch64bsp>; defm BSL1N_ZZZZ : sve2_int_bitwise_ternary_op<0b011, "bsl1n", int_aarch64_sve_bsl1n>; diff --git a/llvm/test/CodeGen/AArch64/sve2-eor3.ll b/llvm/test/CodeGen/AArch64/sve2-eor3.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve2-eor3.ll @@ -0,0 +1,19 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-none-eabi -mattr=+sve2 < %s -o - | FileCheck %s --check-prefix=SVE2 +; RUN: llc -mtriple=aarch64-none-eabi -mattr=+sve < %s -o - | FileCheck %s --check-prefix=SVE + +define @eor3( %0, %1, %2) { +; SVE2-LABEL: eor3: +; SVE2: // %bb.0: +; SVE2-NEXT: eor3 z0.d, z0.d, z1.d, z2.d +; SVE2-NEXT: ret +; +; SVE-LABEL: eor3: +; SVE: // %bb.0: +; SVE-NEXT: eor z0.d, z0.d, z1.d +; SVE-NEXT: eor z0.d, z0.d, z2.d +; SVE-NEXT: ret + %4 = xor %0, %1 + %5 = xor %4, %2 + ret %5 +}