diff --git a/llvm/docs/GlobalISel/AArch64examples.rst b/llvm/docs/GlobalISel/AArch64examples.rst new file mode 100644 --- /dev/null +++ b/llvm/docs/GlobalISel/AArch64examples.rst @@ -0,0 +1,225 @@ +=============================== +GISel Implementation In AArch64 +=============================== + +.. contents:: + :local: + :depth: 1 + +Introduction +------------ + +Illustrating with examples to show the implementation of GlobalISel in AArch64. + +IRTranslator +------------ +To translate LLVM IR function to Generic MIR function. + +Call Lowering +^^^^^^^^^^^^^ +Lowering formal arguments: +To lower incoming arguments. +.. code-block:: none + bool AArch64CallLowering::lowerFormalArguments( + MachineIRBuilder &MIRBuilder, const Function &F, + ArrayRef> VRegs, FunctionLoweringInfo &FLI) const { + MachineFunction &MF = MIRBuilder.getMF(); + MachineBasicBlock &MBB = MIRBuilder.getMBB(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + auto &DL = F.getParent()->getDataLayout(); + + SmallVector SplitArgs; + unsigned i = 0; + for (auto &Arg : F.args()) { + if (DL.getTypeStoreSize(Arg.getType()).isZero()) + continue; + + ArgInfo OrigArg{VRegs[i], Arg, i}; + setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F); + + if (Arg.hasAttribute(Attribute::SwiftAsync)) + MF.getInfo()->setHasSwiftAsyncContext(true); + + splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv()); + ++i; + } + + if (!MBB.empty()) + MIRBuilder.setInstr(*MBB.begin()); + + const AArch64TargetLowering &TLI = *getTLI(); + CCAssignFn *AssignFn = + TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false); + + AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn); + FormalArgHandler Handler(MIRBuilder, MRI); + if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder, + F.getCallingConv(), F.isVarArg())) + return false; + + AArch64FunctionInfo *FuncInfo = MF.getInfo(); + uint64_t StackOffset = Assigner.StackOffset; + if (F.isVarArg()) { + auto &Subtarget = MF.getSubtarget(); + if (!Subtarget.isTargetDarwin()) { + + return false; + } + + + StackOffset = + alignTo(Assigner.StackOffset, Subtarget.isTargetILP32() ? 4 : 8); + + auto &MFI = MIRBuilder.getMF().getFrameInfo(); + FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true)); + } + + if (doesCalleeRestoreStack(F.getCallingConv(), + MF.getTarget().Options.GuaranteedTailCallOpt)) { + + StackOffset = alignTo(StackOffset, 16); + + FuncInfo->setArgumentStackToRestore(StackOffset); + } + + FuncInfo->setBytesInStackArgArea(StackOffset); + + auto &Subtarget = MF.getSubtarget(); + if (Subtarget.hasCustomCallingConv()) + Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF); + + handleMustTailForwardedRegisters(MIRBuilder, AssignFn); + + MIRBuilder.setMBB(MBB); + + return true; + } + +Lowering return values: +To lower outgoing return values into specified virtual registers. +.. code-block:: none + bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, + const Value *Val, + ArrayRef VRegs, + FunctionLoweringInfo &FLI, + Register SwiftErrorVReg) const { + auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR); + assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && + "Return value without a vreg"); + + bool Success = true; + if (!VRegs.empty()) { + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = MF.getFunction(); + const AArch64Subtarget &Subtarget = MF.getSubtarget(); + + MachineRegisterInfo &MRI = MF.getRegInfo(); + const AArch64TargetLowering &TLI = *getTLI(); + CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv()); + auto &DL = F.getParent()->getDataLayout(); + LLVMContext &Ctx = Val->getType()->getContext(); + + SmallVector SplitEVTs; + ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); + assert(VRegs.size() == SplitEVTs.size() && + "For each split Type there should be exactly one VReg."); + + SmallVector SplitArgs; + CallingConv::ID CC = F.getCallingConv(); + + for (unsigned i = 0; i < SplitEVTs.size(); ++i) { + if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) { + LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split"); + return false; + } + + Register CurVReg = VRegs[i]; + ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)}; + setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); + + + if (MRI.getType(CurVReg).getSizeInBits() == 1) { + CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0); + } else { + + MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]); + if (EVT(NewVT) != SplitEVTs[i]) { + unsigned ExtendOp = TargetOpcode::G_ANYEXT; + if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex, + Attribute::SExt)) + ExtendOp = TargetOpcode::G_SEXT; + else if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex, + Attribute::ZExt)) + ExtendOp = TargetOpcode::G_ZEXT; + + LLT NewLLT(NewVT); + LLT OldLLT(MVT::getVT(CurArgInfo.Ty)); + CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx); + + if (NewVT.isVector()) { + if (OldLLT.isVector()) { + if (NewLLT.getNumElements() > OldLLT.getNumElements()) { + + if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) { + LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts"); + return false; + } + auto Undef = MIRBuilder.buildUndef({OldLLT}); + CurVReg = + MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef}).getReg(0); + } else { + + CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}) + .getReg(0); + } + } else if (NewLLT.getNumElements() == 2) { + + auto Undef = MIRBuilder.buildUndef({OldLLT}); + CurVReg = + MIRBuilder + .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)}) + .getReg(0); + } else { + LLVM_DEBUG(dbgs() << "Could not handle ret ty\n"); + return false; + } + } else { + + if (NewLLT != MRI.getType(CurVReg)) { + + CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}) + .getReg(0); + } + } + } + } + if (CurVReg != CurArgInfo.Regs[0]) { + CurArgInfo.Regs[0] = CurVReg; + // Reset the arg flags after modifying CurVReg. + setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); + } + splitToValueTypes(CurArgInfo, SplitArgs, DL, CC); + } + + AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget, + /*IsReturn*/ true); + OutgoingArgHandler Handler(MIRBuilder, MRI, MIB); + Success = determineAndHandleAssignments(Handler, Assigner, SplitArgs, + MIRBuilder, CC, F.isVarArg()); + } + + if (SwiftErrorVReg) { + MIB.addUse(AArch64::X21, RegState::Implicit); + MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg); + } + + MIRBuilder.insertInstr(MIB); + return Success; +} + + + +Lowering function call: + + +