diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -4201,7 +4201,7 @@ } static bool areOperandsOfHigherHalf(SDValue &Op1, SDValue &Op2) { - return isOperandOfHigherHalf(Op1) && isOperandOfHigherHalf(Op2); + return isOperandOfHigherHalf(Op1) || isOperandOfHigherHalf(Op2); } static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, @@ -4548,7 +4548,7 @@ SDValue Op1 = Op.getOperand(1); SDValue Op2 = Op.getOperand(2); - // If both operands are higher half of two source SIMD & FP registers, + // If either operand is the higher half of two source SIMD & FP registers, // ISel could make use of tablegen patterns to emit PMULL2. So do not // legalize i64 to v1i64. if (areOperandsOfHigherHalf(Op1, Op2)) diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -5546,6 +5546,11 @@ def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>; def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>; +// FIXME: We may want to canonicalize to put V64 argument as the second argument. +def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)), + V64:$Rm), + (PMULLv2i64 V128:$Rn, (v2f64 (DUPv2i64lane (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Rm, dsub), (i64 0))))>; + // DUP from a 64-bit register to a 64-bit register is just a copy def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))), (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>; diff --git a/llvm/test/CodeGen/AArch64/pmull-ldr-merge.ll b/llvm/test/CodeGen/AArch64/pmull-ldr-merge.ll --- a/llvm/test/CodeGen/AArch64/pmull-ldr-merge.ll +++ b/llvm/test/CodeGen/AArch64/pmull-ldr-merge.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+aes -o - %s| FileCheck %s --check-prefixes=CHECK +; RUN: llc -O3 -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+aes -o - %s| FileCheck %s --check-prefixes=CHECK ; Tests that scalar i64 arguments of llvm.aarch64.neon.pmull64 are ; loaded into SIMD registers, as opposed to being loaded into GPR followed by a mov. @@ -27,18 +27,18 @@ define void @test2(ptr %0, i64 %1, i64 %2, <2 x i64> %3) { ; CHECK-LABEL: test2: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x9, v0.d[1] ; CHECK-NEXT: add x8, x0, x1, lsl #4 -; CHECK-NEXT: ldr d0, [x8, #8] +; CHECK-NEXT: ldr x9, [x8, #8] ; CHECK-NEXT: fmov d1, x9 -; CHECK-NEXT: pmull v0.1q, v0.1d, v1.1d +; CHECK-NEXT: dup v1.2d, v1.d[0] +; CHECK-NEXT: pmull2 v0.1q, v0.2d, v1.2d ; CHECK-NEXT: str q0, [x8] ; CHECK-NEXT: ret %5 = getelementptr inbounds <2 x i64>, ptr %0, i64 %1 %6 = getelementptr inbounds <2 x i64>, ptr %0, i64 %1, i64 1 %7 = load i64, ptr %6, align 8 %8 = extractelement <2 x i64> %3, i64 1 - %9 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %7, i64 %8) + %9 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %8, i64 %7) store <16 x i8> %9, ptr %5, align 16 ret void } @@ -60,4 +60,51 @@ ret void } +define void @test4(ptr %0, ptr %1) { +; CHECK-LABEL: func: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #56824 +; CHECK-NEXT: mov w9, #61186 +; CHECK-NEXT: movk w8, #40522, lsl #16 +; CHECK-NEXT: movk w9, #29710, lsl #16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: fmov d1, x9 +; CHECK-NEXT: dup v0.2d, v0.d[0] +; CHECK-NEXT: pmull v4.1q, v2.1d, v1.1d +; CHECK-NEXT: pmull v1.1q, v3.1d, v1.1d +; CHECK-NEXT: pmull2 v2.1q, v2.2d, v0.2d +; CHECK-NEXT: pmull2 v0.1q, v3.2d, v0.2d +; CHECK-NEXT: ldp q3, q5, [x0] +; CHECK-NEXT: eor v2.16b, v4.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v1.16b, v0.16b +; CHECK-NEXT: eor v1.16b, v3.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v5.16b, v0.16b +; CHECK-NEXT: stp q1, q0, [x1] +; CHECK-NEXT: ret + %3 = load <2 x i64>, ptr %1 + %4 = getelementptr inbounds <2 x i64>, ptr %1, i64 1 + %5 = load <2 x i64>, ptr %4 + %6 = extractelement <2 x i64> %3, i64 1 + %7 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %6, i64 2655706616) + %8 = extractelement <2 x i64> %5, i64 1 + %9 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %8, i64 2655706616) + %10 = load <2 x i64>, ptr %0 + %11 = getelementptr inbounds i8, ptr %0, i64 16 + %12 = load <2 x i64>, ptr %11 + %13 = extractelement <2 x i64> %3, i64 0 + %14 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %13, i64 1947135746) + %15 = extractelement <2 x i64> %5, i64 0 + %16 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %15, i64 1947135746) + %17 = xor <16 x i8> %14, %7 + %18 = bitcast <16 x i8> %17 to <2 x i64> + %19 = xor <16 x i8> %16, %9 + %20 = bitcast <16 x i8> %19 to <2 x i64> + %21 = xor <2 x i64> %10, %18 + %22 = xor <2 x i64> %12, %20 + store <2 x i64> %21, ptr %1 + store <2 x i64> %22, ptr %4 + ret void +} + declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64)