Index: lib/Target/AArch64/AArch64InstructionSelector.cpp =================================================================== --- lib/Target/AArch64/AArch64InstructionSelector.cpp +++ lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -71,6 +71,7 @@ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineRegisterInfo &MRI) const; + bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI) const; bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const; bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const; bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const; @@ -1696,6 +1697,8 @@ return selectMergeValues(I, MRI); case TargetOpcode::G_UNMERGE_VALUES: return selectUnmergeValues(I, MRI); + case TargetOpcode::G_EXTRACT_VECTOR_ELT: + return selectExtractElt(I, MRI); } return false; @@ -1781,6 +1784,138 @@ return true; } +static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg, + const unsigned EltSize) { + // Choose a lane copy opcode and subregister based off of the size of the + // vector's elements. + switch (EltSize) { + case 16: + CopyOpc = AArch64::CPYi16; + ExtractSubReg = AArch64::hsub; + break; + case 32: + CopyOpc = AArch64::CPYi32; + ExtractSubReg = AArch64::ssub; + break; + case 64: + CopyOpc = AArch64::CPYi64; + ExtractSubReg = AArch64::dsub; + break; + default: + // Unknown size, bail out. + LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n"); + return false; + } + return true; +} + +bool AArch64InstructionSelector::selectExtractElt( + MachineInstr &I, MachineRegisterInfo &MRI) const { + assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT && + "unexpected opcode!"); + unsigned DstReg = I.getOperand(0).getReg(); + const LLT NarrowTy = MRI.getType(DstReg); + const unsigned SrcReg = I.getOperand(1).getReg(); + const LLT WideTy = MRI.getType(SrcReg); + + assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() && + "source register size too small!"); + assert(NarrowTy.isScalar() && "cannot extract vector into vector!"); + + // Need the lane index to determine the correct copy opcode. + MachineOperand &LaneIdxOp = I.getOperand(2); + assert(LaneIdxOp.isReg() && "Lane index operand was not a register?"); + + if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) { + LLVM_DEBUG(dbgs() << "Cannot extract into GPR.\n"); + return false; + } + + // Find the instruction that defines the constant to extract from. There could + // be any number of copies between the instruction and the definition of the + // index. Skip them. + MachineInstr *LaneDefInst = nullptr; + for (LaneDefInst = MRI.getVRegDef(LaneIdxOp.getReg()); + LaneDefInst && LaneDefInst->isCopy(); + LaneDefInst = MRI.getVRegDef(LaneDefInst->getOperand(1).getReg())) { + } + + // Did we find a def in the first place? If not, bail. + if (!LaneDefInst) { + LLVM_DEBUG(dbgs() << "Did not find VReg definition for " << LaneIdxOp + << "\n"); + return false; + } + + // TODO: Handle extracts that don't use G_CONSTANT. + if (LaneDefInst->getOpcode() != TargetOpcode::G_CONSTANT) { + LLVM_DEBUG(dbgs() << "VRegs defined by anything other than G_CONSTANT " + "currently unsupported.\n"); + return false; + } + + unsigned LaneIdx = LaneDefInst->getOperand(1).getCImm()->getLimitedValue(); + unsigned CopyOpc = 0; + unsigned ExtractSubReg = 0; + if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits())) { + LLVM_DEBUG( + dbgs() << "Couldn't determine lane copy opcode for instruction.\n"); + return false; + } + + MachineBasicBlock &MBB = *I.getParent(); + const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); + const TargetRegisterClass *DstRC = + getRegClassForTypeOnBank(NarrowTy, DstRB, RBI, true); + + // The register that we're going to copy into. + unsigned InsertReg = SrcReg; + + // Lane copies require 128-bit wide registers. If we're dealing with an + // unpacked vector, then we need to move up to that width. Insert an implicit + // def and a subregister insert to get us there. + if (WideTy.getSizeInBits() != 128) { + unsigned ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass); + MachineInstr &ImpDefMI = + *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF), + ImpDefReg); + + unsigned SubReg = 0; + if (!getSubRegForClass(DstRC, TRI, SubReg)) + return false; + + // Update the register we're going to insert into since we can't directly + // copy in this case. + InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass); + MachineInstr &InsMI = + *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::INSERT_SUBREG), + InsertReg) + .addUse(ImpDefReg) + .addUse(SrcReg) + .addImm(SubReg); + constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI); + constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI); + } + + if (!DstRC) { + LLVM_DEBUG(dbgs() << "Could not determine destination register class.\n"); + return false; + } + + MachineInstr &LaneCopyMI = + *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), DstReg) + .addUse(InsertReg) + .addImm(LaneIdx); + + constrainSelectedInstRegOperands(LaneCopyMI, TII, TRI, RBI); + + // Make sure that we actually constrain the initial copy. + RBI.constrainGenericRegister(DstReg, *DstRC, MRI); + + I.eraseFromParent(); + return true; +} + bool AArch64InstructionSelector::selectUnmergeValues( MachineInstr &I, MachineRegisterInfo &MRI) const { assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && @@ -1817,24 +1952,8 @@ // vector's elements. unsigned CopyOpc = 0; unsigned ExtractSubReg = 0; - switch (NarrowTy.getSizeInBits()) { - case 16: - CopyOpc = AArch64::CPYi16; - ExtractSubReg = AArch64::hsub; - break; - case 32: - CopyOpc = AArch64::CPYi32; - ExtractSubReg = AArch64::ssub; - break; - case 64: - CopyOpc = AArch64::CPYi64; - ExtractSubReg = AArch64::dsub; - break; - default: - // Unknown size, bail out. - LLVM_DEBUG(dbgs() << "NarrowTy had unsupported size.\n"); + if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits())) return false; - } // Set up for the lane copies. MachineBasicBlock &MBB = *I.getParent(); Index: lib/Target/AArch64/AArch64LegalizerInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -438,7 +438,8 @@ .minScalar(2, s64) .legalIf([=](const LegalityQuery &Query) { const LLT &VecTy = Query.Types[1]; - return VecTy == v4s32 || VecTy == v2s64; + return VecTy == v2s16 || VecTy == v4s16 || VecTy == v4s32 || + VecTy == v2s64 || VecTy == v2s32; }); getActionDefinitionsBuilder(G_BUILD_VECTOR) Index: lib/Target/AArch64/AArch64RegisterBankInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterBankInfo.cpp +++ lib/Target/AArch64/AArch64RegisterBankInfo.cpp @@ -689,6 +689,12 @@ } break; } + case TargetOpcode::G_EXTRACT_VECTOR_ELT: + // Only ever extract into FPRs. + for (unsigned Idx = 0, NumOperands = MI.getNumOperands(); + Idx < NumOperands; ++Idx) + OpRegBankIdx[Idx] = PMI_FirstFPR; + break; case TargetOpcode::G_BUILD_VECTOR: // If the first source operand belongs to a FPR register bank, then make Index: test/CodeGen/AArch64/GlobalISel/select-extract-vector-elt.mir =================================================================== --- /dev/null +++ test/CodeGen/AArch64/GlobalISel/select-extract-vector-elt.mir @@ -0,0 +1,94 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-unknown-unknown -verify-machineinstrs -O0 -run-pass=instruction-select %s -o - | FileCheck %s +... +--- +name: v2s32_fpr +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: fpr } + - { id: 1, class: fpr } + - { id: 2, class: gpr } + - { id: 3, class: fpr } +body: | + bb.0: + liveins: $d0 + + ; CHECK-LABEL: name: v2s32_fpr + ; CHECK: liveins: $d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub + ; CHECK: [[CPYi32_:%[0-9]+]]:fpr32 = CPYi32 [[INSERT_SUBREG]], 1 + ; CHECK: $s0 = COPY [[CPYi32_]] + ; CHECK: RET_ReallyLR implicit $s0 + %0:fpr(<2 x s32>) = COPY $d0 + %2:gpr(s64) = G_CONSTANT i64 1 + %3:fpr(s64) = COPY %2(s64) + %1:fpr(s32) = G_EXTRACT_VECTOR_ELT %0(<2 x s32>), %3(s64) + $s0 = COPY %1(s32) + RET_ReallyLR implicit $s0 + +... +--- +name: v2s64_fpr +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: fpr } + - { id: 1, class: fpr } + - { id: 2, class: gpr } + - { id: 3, class: fpr } +body: | + bb.0: + liveins: $q0 + + ; CHECK-LABEL: name: v2s64_fpr + ; CHECK: liveins: $q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[CPYi64_:%[0-9]+]]:fpr64 = CPYi64 [[COPY]], 2 + ; CHECK: $d0 = COPY [[CPYi64_]] + ; CHECK: RET_ReallyLR implicit $d0 + %0:fpr(<2 x s64>) = COPY $q0 + %2:gpr(s64) = G_CONSTANT i64 2 + %3:fpr(s64) = COPY %2(s64) + %1:fpr(s64) = G_EXTRACT_VECTOR_ELT %0(<2 x s64>), %3(s64) + $d0 = COPY %1(s64) + RET_ReallyLR implicit $d0 + +... +--- +name: v4s16_fpr +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: fpr } + - { id: 1, class: fpr } + - { id: 2, class: gpr } + - { id: 3, class: fpr } +body: | + bb.0: + liveins: $d0 + + ; CHECK-LABEL: name: v4s16_fpr + ; CHECK: liveins: $d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.hsub + ; CHECK: [[CPYi16_:%[0-9]+]]:fpr16 = CPYi16 [[INSERT_SUBREG]], 1 + ; CHECK: $h0 = COPY [[CPYi16_]] + ; CHECK: RET_ReallyLR implicit $h0 + %0:fpr(<4 x s16>) = COPY $d0 + %2:gpr(s64) = G_CONSTANT i64 1 + %3:fpr(s64) = COPY %2(s64) + %1:fpr(s16) = G_EXTRACT_VECTOR_ELT %0(<4 x s16>), %3(s64) + $h0 = COPY %1(s16) + RET_ReallyLR implicit $h0 + +...