diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -152,6 +152,7 @@ bool selectJumpTable(MachineInstr &I, MachineRegisterInfo &MRI) const; bool selectBrJT(MachineInstr &I, MachineRegisterInfo &MRI) const; bool selectTLSGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI) const; + bool selectReduction(MachineInstr &I, MachineRegisterInfo &MRI) const; unsigned emitConstantPoolEntry(const Constant *CPVal, MachineFunction &MF) const; @@ -2960,11 +2961,52 @@ return selectConcatVectors(I, MRI); case TargetOpcode::G_JUMP_TABLE: return selectJumpTable(I, MRI); + case TargetOpcode::G_VECREDUCE_FADD: + case TargetOpcode::G_VECREDUCE_ADD: + return selectReduction(I, MRI); } return false; } +bool AArch64InstructionSelector::selectReduction( + MachineInstr &I, MachineRegisterInfo &MRI) const { + Register VecReg = I.getOperand(1).getReg(); + LLT VecTy = MRI.getType(VecReg); + if (I.getOpcode() == TargetOpcode::G_VECREDUCE_ADD) { + unsigned Opc = 0; + if (VecTy == LLT::vector(16, 8)) + Opc = AArch64::ADDVv16i8v; + else if (VecTy == LLT::vector(8, 16)) + Opc = AArch64::ADDVv8i16v; + else if (VecTy == LLT::vector(4, 32)) + Opc = AArch64::ADDVv4i32v; + else if (VecTy == LLT::vector(2, 64)) + Opc = AArch64::ADDPv2i64p; + else { + LLVM_DEBUG(dbgs() << "Unhandled type for add reduction"); + return false; + } + I.setDesc(TII.get(Opc)); + return constrainSelectedInstRegOperands(I, TII, TRI, RBI); + } + + if (I.getOpcode() == TargetOpcode::G_VECREDUCE_FADD) { + unsigned Opc = 0; + if (VecTy == LLT::vector(2, 32)) + Opc = AArch64::FADDPv2i32p; + else if (VecTy == LLT::vector(2, 64)) + Opc = AArch64::FADDPv2i64p; + else { + LLVM_DEBUG(dbgs() << "Unhandled type for fadd reduction"); + return false; + } + I.setDesc(TII.get(Opc)); + return constrainSelectedInstRegOperands(I, TII, TRI, RBI); + } + return false; +} + bool AArch64InstructionSelector::selectBrJT(MachineInstr &I, MachineRegisterInfo &MRI) const { assert(I.getOpcode() == TargetOpcode::G_BRJT && "Expected G_BRJT"); diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-add.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-add.mir @@ -0,0 +1,114 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs -global-isel-abort=1 %s -o - | FileCheck %s +--- +name: add_B +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +liveins: + - { reg: '$x0' } +body: | + bb.1: + liveins: $x0 + + ; CHECK-LABEL: name: add_B + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load 16) + ; CHECK: [[ADDVv16i8v:%[0-9]+]]:fpr8 = ADDVv16i8v [[LDRQui]] + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[ADDVv16i8v]], %subreg.bsub + ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]] + ; CHECK: $w0 = COPY [[COPY1]] + ; CHECK: RET_ReallyLR implicit $w0 + %0:gpr(p0) = COPY $x0 + %1:fpr(<16 x s8>) = G_LOAD %0(p0) :: (load 16) + %2:fpr(s8) = G_VECREDUCE_ADD %1(<16 x s8>) + %4:gpr(s8) = COPY %2(s8) + %3:gpr(s32) = G_ANYEXT %4(s8) + $w0 = COPY %3(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: add_H +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +liveins: + - { reg: '$x0' } +body: | + bb.1: + liveins: $x0 + + ; CHECK-LABEL: name: add_H + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load 16) + ; CHECK: [[ADDVv8i16v:%[0-9]+]]:fpr16 = ADDVv8i16v [[LDRQui]] + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[ADDVv8i16v]], %subreg.hsub + ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]] + ; CHECK: $w0 = COPY [[COPY1]] + ; CHECK: RET_ReallyLR implicit $w0 + %0:gpr(p0) = COPY $x0 + %1:fpr(<8 x s16>) = G_LOAD %0(p0) :: (load 16) + %2:fpr(s16) = G_VECREDUCE_ADD %1(<8 x s16>) + %4:gpr(s16) = COPY %2(s16) + %3:gpr(s32) = G_ANYEXT %4(s16) + $w0 = COPY %3(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: add_S +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +liveins: + - { reg: '$x0' } +body: | + bb.1: + liveins: $x0 + + ; CHECK-LABEL: name: add_S + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load 16) + ; CHECK: [[ADDVv4i32v:%[0-9]+]]:fpr32 = ADDVv4i32v [[LDRQui]] + ; CHECK: $w0 = COPY [[ADDVv4i32v]] + ; CHECK: RET_ReallyLR implicit $w0 + %0:gpr(p0) = COPY $x0 + %1:fpr(<4 x s32>) = G_LOAD %0(p0) :: (load 16) + %2:fpr(s32) = G_VECREDUCE_ADD %1(<4 x s32>) + $w0 = COPY %2(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: add_D +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +liveins: + - { reg: '$x0' } +body: | + bb.1: + liveins: $x0 + + ; CHECK-LABEL: name: add_D + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load 16) + ; CHECK: [[ADDPv2i64p:%[0-9]+]]:fpr64 = ADDPv2i64p [[LDRQui]] + ; CHECK: $x0 = COPY [[ADDPv2i64p]] + ; CHECK: RET_ReallyLR implicit $x0 + %0:gpr(p0) = COPY $x0 + %1:fpr(<2 x s64>) = G_LOAD %0(p0) :: (load 16) + %2:fpr(s64) = G_VECREDUCE_ADD %1(<2 x s64>) + $x0 = COPY %2(s64) + RET_ReallyLR implicit $x0 + +... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-fadd.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-fadd.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-fadd.mir @@ -0,0 +1,44 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs -global-isel-abort=1 %s -o - | FileCheck %s +--- +name: fadd_v2s32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.1: + liveins: $d0 + + ; CHECK-LABEL: name: fadd_v2s32 + ; CHECK: liveins: $d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[FADDPv2i32p:%[0-9]+]]:fpr32 = FADDPv2i32p [[COPY]] + ; CHECK: $w0 = COPY [[FADDPv2i32p]] + ; CHECK: RET_ReallyLR implicit $w0 + %0:fpr(<2 x s32>) = COPY $d0 + %1:fpr(s32) = G_VECREDUCE_FADD %0(<2 x s32>) + $w0 = COPY %1(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: fadd_v2s64 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.1: + liveins: $q0 + + ; CHECK-LABEL: name: fadd_v2s64 + ; CHECK: liveins: $q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[FADDPv2i64p:%[0-9]+]]:fpr64 = FADDPv2i64p [[COPY]] + ; CHECK: $x0 = COPY [[FADDPv2i64p]] + ; CHECK: RET_ReallyLR implicit $x0 + %0:fpr(<2 x s64>) = COPY $q0 + %2:fpr(s64) = G_VECREDUCE_FADD %0(<2 x s64>) + $x0 = COPY %2(s64) + RET_ReallyLR implicit $x0 + +...