diff --git a/llvm/include/llvm/CodeGen/CodeGenCommonISel.h b/llvm/include/llvm/CodeGen/CodeGenCommonISel.h new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/CodeGen/CodeGenCommonISel.h @@ -0,0 +1,238 @@ +//===- CodeGenCommonISel.h - Common code between ISels ---------*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file delcares common utilies that are shared between SelectionDAG and +// GlobalISel frameworks. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_CODEGENCOMMONISEL_H +#define LLVM_CODEGEN_CODEGENCOMMONISEL_H + +#include "llvm/CodeGen/MachineBasicBlock.h" +#include +namespace llvm { + +class BasicBlock; +class MachineBasicBlock; +/// A class which encapsulates all of the information needed to generate a +/// stack protector check and signals to isel via its state being initialized +/// that a stack protector needs to be generated. +/// +/// *NOTE* The following is a high level documentation of SelectionDAG Stack +/// Protector Generation. The reason that it is placed here is for a lack of +/// other good places to stick it. +/// +/// High Level Overview of SelectionDAG Stack Protector Generation: +/// +/// Previously, generation of stack protectors was done exclusively in the +/// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated +/// splitting basic blocks at the IR level to create the success/failure basic +/// blocks in the tail of the basic block in question. As a result of this, +/// calls that would have qualified for the sibling call optimization were no +/// longer eligible for optimization since said calls were no longer right in +/// the "tail position" (i.e. the immediate predecessor of a ReturnInst +/// instruction). +/// +/// Then it was noticed that since the sibling call optimization causes the +/// callee to reuse the caller's stack, if we could delay the generation of +/// the stack protector check until later in CodeGen after the sibling call +/// decision was made, we get both the tail call optimization and the stack +/// protector check! +/// +/// A few goals in solving this problem were: +/// +/// 1. Preserve the architecture independence of stack protector generation. +/// +/// 2. Preserve the normal IR level stack protector check for platforms like +/// OpenBSD for which we support platform-specific stack protector +/// generation. +/// +/// The main problem that guided the present solution is that one can not +/// solve this problem in an architecture independent manner at the IR level +/// only. This is because: +/// +/// 1. The decision on whether or not to perform a sibling call on certain +/// platforms (for instance i386) requires lower level information +/// related to available registers that can not be known at the IR level. +/// +/// 2. Even if the previous point were not true, the decision on whether to +/// perform a tail call is done in LowerCallTo in SelectionDAG which +/// occurs after the Stack Protector Pass. As a result, one would need to +/// put the relevant callinst into the stack protector check success +/// basic block (where the return inst is placed) and then move it back +/// later at SelectionDAG/MI time before the stack protector check if the +/// tail call optimization failed. The MI level option was nixed +/// immediately since it would require platform-specific pattern +/// matching. The SelectionDAG level option was nixed because +/// SelectionDAG only processes one IR level basic block at a time +/// implying one could not create a DAG Combine to move the callinst. +/// +/// To get around this problem a few things were realized: +/// +/// 1. While one can not handle multiple IR level basic blocks at the +/// SelectionDAG Level, one can generate multiple machine basic blocks +/// for one IR level basic block. This is how we handle bit tests and +/// switches. +/// +/// 2. At the MI level, tail calls are represented via a special return +/// MIInst called "tcreturn". Thus if we know the basic block in which we +/// wish to insert the stack protector check, we get the correct behavior +/// by always inserting the stack protector check right before the return +/// statement. This is a "magical transformation" since no matter where +/// the stack protector check intrinsic is, we always insert the stack +/// protector check code at the end of the BB. +/// +/// Given the aforementioned constraints, the following solution was devised: +/// +/// 1. On platforms that do not support SelectionDAG stack protector check +/// generation, allow for the normal IR level stack protector check +/// generation to continue. +/// +/// 2. On platforms that do support SelectionDAG stack protector check +/// generation: +/// +/// a. Use the IR level stack protector pass to decide if a stack +/// protector is required/which BB we insert the stack protector check +/// in by reusing the logic already therein. If we wish to generate a +/// stack protector check in a basic block, we place a special IR +/// intrinsic called llvm.stackprotectorcheck right before the BB's +/// returninst or if there is a callinst that could potentially be +/// sibling call optimized, before the call inst. +/// +/// b. Then when a BB with said intrinsic is processed, we codegen the BB +/// normally via SelectBasicBlock. In said process, when we visit the +/// stack protector check, we do not actually emit anything into the +/// BB. Instead, we just initialize the stack protector descriptor +/// class (which involves stashing information/creating the success +/// mbbb and the failure mbb if we have not created one for this +/// function yet) and export the guard variable that we are going to +/// compare. +/// +/// c. After we finish selecting the basic block, in FinishBasicBlock if +/// the StackProtectorDescriptor attached to the SelectionDAGBuilder is +/// initialized, we produce the validation code with one of these +/// techniques: +/// 1) with a call to a guard check function +/// 2) with inlined instrumentation +/// +/// 1) We insert a call to the check function before the terminator. +/// +/// 2) We first find a splice point in the parent basic block +/// before the terminator and then splice the terminator of said basic +/// block into the success basic block. Then we code-gen a new tail for +/// the parent basic block consisting of the two loads, the comparison, +/// and finally two branches to the success/failure basic blocks. We +/// conclude by code-gening the failure basic block if we have not +/// code-gened it already (all stack protector checks we generate in +/// the same function, use the same failure basic block). +class StackProtectorDescriptor { +public: + StackProtectorDescriptor() = default; + + /// Returns true if all fields of the stack protector descriptor are + /// initialized implying that we should/are ready to emit a stack protector. + bool shouldEmitStackProtector() const { + return ParentMBB && SuccessMBB && FailureMBB; + } + + bool shouldEmitFunctionBasedCheckStackProtector() const { + return ParentMBB && !SuccessMBB && !FailureMBB; + } + + /// Initialize the stack protector descriptor structure for a new basic + /// block. + void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, + bool FunctionBasedInstrumentation) { + // Make sure we are not initialized yet. + assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is " + "already initialized!"); + ParentMBB = MBB; + if (!FunctionBasedInstrumentation) { + SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true); + FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB); + } + } + + /// Reset state that changes when we handle different basic blocks. + /// + /// This currently includes: + /// + /// 1. The specific basic block we are generating a + /// stack protector for (ParentMBB). + /// + /// 2. The successor machine basic block that will contain the tail of + /// parent mbb after we create the stack protector check (SuccessMBB). This + /// BB is visited only on stack protector check success. + void resetPerBBState() { + ParentMBB = nullptr; + SuccessMBB = nullptr; + } + + /// Reset state that only changes when we switch functions. + /// + /// This currently includes: + /// + /// 1. FailureMBB since we reuse the failure code path for all stack + /// protector checks created in an individual function. + /// + /// 2.The guard variable since the guard variable we are checking against is + /// always the same. + void resetPerFunctionState() { FailureMBB = nullptr; } + + MachineBasicBlock *getParentMBB() { return ParentMBB; } + MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } + MachineBasicBlock *getFailureMBB() { return FailureMBB; } + +private: + /// The basic block for which we are generating the stack protector. + /// + /// As a result of stack protector generation, we will splice the + /// terminators of this basic block into the successor mbb SuccessMBB and + /// replace it with a compare/branch to the successor mbbs + /// SuccessMBB/FailureMBB depending on whether or not the stack protector + /// was violated. + MachineBasicBlock *ParentMBB = nullptr; + + /// A basic block visited on stack protector check success that contains the + /// terminators of ParentMBB. + MachineBasicBlock *SuccessMBB = nullptr; + + /// This basic block visited on stack protector check failure that will + /// contain a call to __stack_chk_fail(). + MachineBasicBlock *FailureMBB = nullptr; + + /// Add a successor machine basic block to ParentMBB. If the successor mbb + /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic + /// block will be created. Assign a large weight if IsLikely is true. + MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB, + MachineBasicBlock *ParentMBB, + bool IsLikely, + MachineBasicBlock *SuccMBB = nullptr); +}; + +/// Find the split point at which to splice the end of BB into its success stack +/// protector check machine basic block. +/// +/// On many platforms, due to ABI constraints, terminators, even before register +/// allocation, use physical registers. This creates an issue for us since +/// physical registers at this point can not travel across basic +/// blocks. Luckily, selectiondag always moves physical registers into vregs +/// when they enter functions and moves them through a sequence of copies back +/// into the physical registers right before the terminator creating a +/// ``Terminator Sequence''. This function is searching for the beginning of the +/// terminator sequence so that we can ensure that we splice off not just the +/// terminator, but additionally the copies that move the vregs into the +/// physical registers. +MachineBasicBlock::iterator +FindSplitPointForStackProtector(MachineBasicBlock *BB); + +} + + +#endif // LLVM_CODEGEN_CODEGENCOMMONISEL_H diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h --- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h @@ -20,6 +20,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/CodeGenCommonISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" #include "llvm/CodeGen/MachineFunctionPass.h" @@ -586,6 +587,8 @@ /// stop translating such blocks early. bool HasTailCall = false; + StackProtectorDescriptor SPDescriptor; + /// Switch analysis and optimization. class GISelSwitchLowering : public SwitchCG::SwitchLowering { public: @@ -614,8 +617,34 @@ // * Clear the different maps. void finalizeFunction(); - // Handle emitting jump tables for each basic block. - void finalizeBasicBlock(); + // Processing steps done per block. E.g. emitting jump tables, stack + // protectors etc. Returns true if no errors, false if there was a problem + // that caused an abort. + bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB); + + /// Codegen a new tail for a stack protector check ParentMBB which has had its + /// tail spliced into a stack protector check success bb. + /// + /// For a high level explanation of how this fits into the stack protector + /// generation see the comment on the declaration of class + /// StackProtectorDescriptor. + /// + /// \return true if there were no problems. + bool emitSPDescriptorParent(StackProtectorDescriptor &SPD, + MachineBasicBlock *ParentBB); + + /// Codegen the failure basic block for a stack protector check. + /// + /// A failure stack protector machine basic block consists simply of a call to + /// __stack_chk_fail(). + /// + /// For a high level explanation of how this fits into the stack protector + /// generation see the comment on the declaration of class + /// StackProtectorDescriptor. + /// + /// \return true if there were no problems. + bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD, + MachineBasicBlock *FailureBB); /// Get the VRegs that represent \p Val. /// Non-aggregate types have just one corresponding VReg and the list can be diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt --- a/llvm/lib/CodeGen/CMakeLists.txt +++ b/llvm/lib/CodeGen/CMakeLists.txt @@ -14,6 +14,7 @@ CFGuardLongjmp.cpp CFIInstrInserter.cpp CodeGen.cpp + CodeGenCommonISel.cpp CodeGenPassBuilder.cpp CodeGenPrepare.cpp CommandFlags.cpp diff --git a/llvm/lib/CodeGen/CodeGenCommonISel.cpp b/llvm/lib/CodeGen/CodeGenCommonISel.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/CodeGen/CodeGenCommonISel.cpp @@ -0,0 +1,121 @@ +//===-- CodeGenCommonISel.cpp ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines common utilies that are shared between SelectionDAG and +// GlobalISel frameworks. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/CodeGenCommonISel.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/Analysis/BranchProbabilityInfo.h" + +using namespace llvm; + +/// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB +/// is 0. +MachineBasicBlock * +StackProtectorDescriptor::AddSuccessorMBB( + const BasicBlock *BB, MachineBasicBlock *ParentMBB, bool IsLikely, + MachineBasicBlock *SuccMBB) { + // If SuccBB has not been created yet, create it. + if (!SuccMBB) { + MachineFunction *MF = ParentMBB->getParent(); + MachineFunction::iterator BBI(ParentMBB); + SuccMBB = MF->CreateMachineBasicBlock(BB); + MF->insert(++BBI, SuccMBB); + } + // Add it as a successor of ParentMBB. + ParentMBB->addSuccessor( + SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely)); + return SuccMBB; +} + +/// Given that the input MI is before a partial terminator sequence TSeq, return +/// true if M + TSeq also a partial terminator sequence. +/// +/// A Terminator sequence is a sequence of MachineInstrs which at this point in +/// lowering copy vregs into physical registers, which are then passed into +/// terminator instructors so we can satisfy ABI constraints. A partial +/// terminator sequence is an improper subset of a terminator sequence (i.e. it +/// may be the whole terminator sequence). +static bool MIIsInTerminatorSequence(const MachineInstr &MI) { + // If we do not have a copy or an implicit def, we return true if and only if + // MI is a debug value. + if (!MI.isCopy() && !MI.isImplicitDef()) { + // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the + // physical registers if there is debug info associated with the terminator + // of our mbb. We want to include said debug info in our terminator + // sequence, so we return true in that case. + if (MI.isDebugValue()) + return true; + + // For GlobalISel, we may have extension instructions for arguments within + // copy sequences. Allow these. + switch (MI.getOpcode()) { + case TargetOpcode::G_ANYEXT: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_SEXT: + return true; + default: + return false; + } + } + + // We have left the terminator sequence if we are not doing one of the + // following: + // + // 1. Copying a vreg into a physical register. + // 2. Copying a vreg into a vreg. + // 3. Defining a register via an implicit def. + + // OPI should always be a register definition... + MachineInstr::const_mop_iterator OPI = MI.operands_begin(); + if (!OPI->isReg() || !OPI->isDef()) + return false; + + // Defining any register via an implicit def is always ok. + if (MI.isImplicitDef()) + return true; + + // Grab the copy source... + MachineInstr::const_mop_iterator OPI2 = OPI; + ++OPI2; + assert(OPI2 != MI.operands_end() + && "Should have a copy implying we should have 2 arguments."); + + // Make sure that the copy dest is not a vreg when the copy source is a + // physical register. + if (!OPI2->isReg() || (!Register::isPhysicalRegister(OPI->getReg()) && + Register::isPhysicalRegister(OPI2->getReg()))) + return false; + + return true; +} + +MachineBasicBlock::iterator +llvm::FindSplitPointForStackProtector(MachineBasicBlock *BB) { + MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator(); + // + if (SplitPoint == BB->begin()) + return SplitPoint; + + MachineBasicBlock::iterator Start = BB->begin(); + MachineBasicBlock::iterator Previous = SplitPoint; + --Previous; + + while (MIIsInTerminatorSequence(*Previous)) { + SplitPoint = Previous; + if (Previous == Start) + break; + --Previous; + } + + return SplitPoint; +} diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -23,6 +23,7 @@ #include "llvm/CodeGen/GlobalISel/CallLowering.h" #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/LowLevelType.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFrameInfo.h" @@ -32,6 +33,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/StackProtector.h" #include "llvm/CodeGen/SwitchLoweringUtils.h" #include "llvm/CodeGen/TargetFrameLowering.h" @@ -2923,7 +2925,8 @@ return true; } -void IRTranslator::finalizeBasicBlock() { +bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB, + MachineBasicBlock &MBB) { for (auto &BTB : SL->BitTestCases) { // Emit header first, if it wasn't already emitted. if (!BTB.Emitted) @@ -2987,6 +2990,179 @@ for (auto &SwCase : SL->SwitchCases) emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder); SL->SwitchCases.clear(); + + // Check if we need to generate stack-protector guard checks. + StackProtector &SP = getAnalysis(); + if (SP.shouldEmitSDCheck(BB)) { + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + bool FunctionBasedInstrumentation = + TLI.getSSPStackGuardCheck(*MF->getFunction().getParent()); + SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation); + } + // Handle stack protector. + if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) { + LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n"); + return false; + } else if (SPDescriptor.shouldEmitStackProtector()) { + MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB(); + MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB(); + + // Find the split point to split the parent mbb. At the same time copy all + // physical registers used in the tail of parent mbb into virtual registers + // before the split point and back into physical registers after the split + // point. This prevents us needing to deal with Live-ins and many other + // register allocation issues caused by us splitting the parent mbb. The + // register allocator will clean up said virtual copies later on. + MachineBasicBlock::iterator SplitPoint = + FindSplitPointForStackProtector(ParentMBB); + + // Splice the terminator of ParentMBB into SuccessMBB. + SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint, + ParentMBB->end()); + + // Add compare/jump on neq/jump to the parent BB. + if (!emitSPDescriptorParent(SPDescriptor, ParentMBB)) + return false; + + // CodeGen Failure MBB if we have not codegened it yet. + MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB(); + if (FailureMBB->empty()) { + if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB)) + return false; + } + + // Clear the Per-BB State. + SPDescriptor.resetPerBBState(); + } + return true; +} + +bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD, + MachineBasicBlock *ParentBB) { + MachineIRBuilder MIRBuilder(*ParentBB, ParentBB->end()); + MIRBuilder.setDebugLoc(CurBuilder->getDebugLoc()); + // First create the loads to the guard/stack slot for the comparison. + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); + const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); + LLT PtrMemTy = getLLTForMVT(TLI.getPointerMemTy(*DL)); + + MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); + int FI = MFI.getStackProtectorIndex(); + + Register Guard; + Register StackSlotPtr = MIRBuilder.buildFrameIndex(PtrTy, FI).getReg(0); + const Module &M = *ParentBB->getParent()->getFunction().getParent(); + Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext())); + + // Generate code to load the content of the guard slot. + Register GuardVal = + MIRBuilder + .buildLoad( + PtrMemTy, StackSlotPtr, + *MF->getMachineMemOperand( + MachinePointerInfo::getFixedStack(*MF, FI), + MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, + PtrTy.getSizeInBytes(), Align)) + .getReg(0); + + if (TLI.useStackGuardXorFP()) { + LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented"); + return false; + } + + // Retrieve guard check function, nullptr if instrumentation is inlined. + if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) { + // This path is currently untestable on GlobalISel, since the only platform + // that needs this seems to be Windows, and we fall back on that currently. + // The code still lives here in case that changes. + return false; +#if 0 + // The target provides a guard check function to validate the guard value. + // Generate a call to that function with the content of the guard slot as + // argument. + FunctionType *FnTy = GuardCheckFn->getFunctionType(); + assert(FnTy->getNumParams() == 1 && "Invalid function signature"); + ISD::ArgFlagsTy Flags; + if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg)) + Flags.setInReg(); + CallLowering::ArgInfo GuardArgInfo( + {GuardVal, FnTy->getParamType(0), {Flags}}); + + CallLowering::CallLoweringInfo Info; + Info.OrigArgs.push_back(GuardArgInfo); + Info.CallConv = GuardCheckFn->getCallingConv(); + Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0); + Info.OrigRet = {Register(), FnTy->getReturnType()}; + if (!CLI->lowerCall(MIRBuilder, Info)) { + LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n"); + return false; + } + return true; +#endif + } + + // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. + // Otherwise, emit a volatile load to retrieve the stack guard value. + if (TLI.useLoadStackGuardNode()) { + Guard = + MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits())); + getStackGuard(Guard, MIRBuilder); + } else { + // TODO: test using android subtarget when we support @llvm.thread.pointer. + const Value *IRGuard = TLI.getSDagStackGuard(M); + Register GuardPtr = getOrCreateVReg(*IRGuard); + + Guard = MIRBuilder + .buildLoad(PtrMemTy, GuardPtr, + *MF->getMachineMemOperand( + MachinePointerInfo::getFixedStack(*MF, FI), + MachineMemOperand::MOLoad | + MachineMemOperand::MOVolatile, + PtrTy.getSizeInBytes(), Align)) + .getReg(0); + } + + // Perform the comparison. + auto Cmp = + MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal); + // If the guard/stackslot do not equal, branch to failure MBB. + MIRBuilder.buildBrCond(Cmp, *SPD.getFailureMBB()); + // Otherwise branch to success MBB. + MIRBuilder.buildBr(*SPD.getSuccessMBB()); + return true; +} + +bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD, + MachineBasicBlock *FailureBB) { + MachineIRBuilder MIRBuilder(*FailureBB, FailureBB->end()); + MIRBuilder.setDebugLoc(CurBuilder->getDebugLoc()); + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + + const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL; + const char *Name = TLI.getLibcallName(Libcall); + + CallLowering::CallLoweringInfo Info; + Info.CallConv = TLI.getLibcallCallingConv(Libcall); + Info.Callee = MachineOperand::CreateES(Name); + Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext())}; + if (!CLI->lowerCall(MIRBuilder, Info)) { + LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n"); + return false; + } + + // On PS4, the "return address" must still be within the calling function, + // even if it's at the very end, so emit an explicit TRAP here. + // Passing 'true' for doesNotReturn above won't generate the trap for us. + // WebAssembly needs an unreachable instruction after a non-returning call, + // because the function return type can be different from __stack_chk_fail's + // return type (void). + const TargetMachine &TM = MF->getTarget(); + if (TM.getTargetTriple().isPS4CPU() || TM.getTargetTriple().isWasm()) { + LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n"); + return false; + } + return true; } void IRTranslator::finalizeFunction() { @@ -3002,6 +3178,7 @@ EntryBuilder.reset(); CurBuilder.reset(); FuncInfo.clear(); + SPDescriptor.resetPerFunctionState(); } /// Returns true if a BasicBlock \p BB within a variadic function contains a @@ -3188,7 +3365,8 @@ return false; } - finalizeBasicBlock(); + if (!finalizeBasicBlock(*BB, MBB)) + return false; } #ifndef NDEBUG WrapperObserver.removeObserver(&Verifier); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -18,6 +18,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/CodeGenCommonISel.h" #include "llvm/CodeGen/ISDOpcodes.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SwitchLoweringUtils.h" @@ -180,204 +181,6 @@ SwitchCG::CaseClusterVector &Clusters, BranchProbability &PeeledCaseProb); - /// A class which encapsulates all of the information needed to generate a - /// stack protector check and signals to isel via its state being initialized - /// that a stack protector needs to be generated. - /// - /// *NOTE* The following is a high level documentation of SelectionDAG Stack - /// Protector Generation. The reason that it is placed here is for a lack of - /// other good places to stick it. - /// - /// High Level Overview of SelectionDAG Stack Protector Generation: - /// - /// Previously, generation of stack protectors was done exclusively in the - /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated - /// splitting basic blocks at the IR level to create the success/failure basic - /// blocks in the tail of the basic block in question. As a result of this, - /// calls that would have qualified for the sibling call optimization were no - /// longer eligible for optimization since said calls were no longer right in - /// the "tail position" (i.e. the immediate predecessor of a ReturnInst - /// instruction). - /// - /// Then it was noticed that since the sibling call optimization causes the - /// callee to reuse the caller's stack, if we could delay the generation of - /// the stack protector check until later in CodeGen after the sibling call - /// decision was made, we get both the tail call optimization and the stack - /// protector check! - /// - /// A few goals in solving this problem were: - /// - /// 1. Preserve the architecture independence of stack protector generation. - /// - /// 2. Preserve the normal IR level stack protector check for platforms like - /// OpenBSD for which we support platform-specific stack protector - /// generation. - /// - /// The main problem that guided the present solution is that one can not - /// solve this problem in an architecture independent manner at the IR level - /// only. This is because: - /// - /// 1. The decision on whether or not to perform a sibling call on certain - /// platforms (for instance i386) requires lower level information - /// related to available registers that can not be known at the IR level. - /// - /// 2. Even if the previous point were not true, the decision on whether to - /// perform a tail call is done in LowerCallTo in SelectionDAG which - /// occurs after the Stack Protector Pass. As a result, one would need to - /// put the relevant callinst into the stack protector check success - /// basic block (where the return inst is placed) and then move it back - /// later at SelectionDAG/MI time before the stack protector check if the - /// tail call optimization failed. The MI level option was nixed - /// immediately since it would require platform-specific pattern - /// matching. The SelectionDAG level option was nixed because - /// SelectionDAG only processes one IR level basic block at a time - /// implying one could not create a DAG Combine to move the callinst. - /// - /// To get around this problem a few things were realized: - /// - /// 1. While one can not handle multiple IR level basic blocks at the - /// SelectionDAG Level, one can generate multiple machine basic blocks - /// for one IR level basic block. This is how we handle bit tests and - /// switches. - /// - /// 2. At the MI level, tail calls are represented via a special return - /// MIInst called "tcreturn". Thus if we know the basic block in which we - /// wish to insert the stack protector check, we get the correct behavior - /// by always inserting the stack protector check right before the return - /// statement. This is a "magical transformation" since no matter where - /// the stack protector check intrinsic is, we always insert the stack - /// protector check code at the end of the BB. - /// - /// Given the aforementioned constraints, the following solution was devised: - /// - /// 1. On platforms that do not support SelectionDAG stack protector check - /// generation, allow for the normal IR level stack protector check - /// generation to continue. - /// - /// 2. On platforms that do support SelectionDAG stack protector check - /// generation: - /// - /// a. Use the IR level stack protector pass to decide if a stack - /// protector is required/which BB we insert the stack protector check - /// in by reusing the logic already therein. If we wish to generate a - /// stack protector check in a basic block, we place a special IR - /// intrinsic called llvm.stackprotectorcheck right before the BB's - /// returninst or if there is a callinst that could potentially be - /// sibling call optimized, before the call inst. - /// - /// b. Then when a BB with said intrinsic is processed, we codegen the BB - /// normally via SelectBasicBlock. In said process, when we visit the - /// stack protector check, we do not actually emit anything into the - /// BB. Instead, we just initialize the stack protector descriptor - /// class (which involves stashing information/creating the success - /// mbbb and the failure mbb if we have not created one for this - /// function yet) and export the guard variable that we are going to - /// compare. - /// - /// c. After we finish selecting the basic block, in FinishBasicBlock if - /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is - /// initialized, we produce the validation code with one of these - /// techniques: - /// 1) with a call to a guard check function - /// 2) with inlined instrumentation - /// - /// 1) We insert a call to the check function before the terminator. - /// - /// 2) We first find a splice point in the parent basic block - /// before the terminator and then splice the terminator of said basic - /// block into the success basic block. Then we code-gen a new tail for - /// the parent basic block consisting of the two loads, the comparison, - /// and finally two branches to the success/failure basic blocks. We - /// conclude by code-gening the failure basic block if we have not - /// code-gened it already (all stack protector checks we generate in - /// the same function, use the same failure basic block). - class StackProtectorDescriptor { - public: - StackProtectorDescriptor() = default; - - /// Returns true if all fields of the stack protector descriptor are - /// initialized implying that we should/are ready to emit a stack protector. - bool shouldEmitStackProtector() const { - return ParentMBB && SuccessMBB && FailureMBB; - } - - bool shouldEmitFunctionBasedCheckStackProtector() const { - return ParentMBB && !SuccessMBB && !FailureMBB; - } - - /// Initialize the stack protector descriptor structure for a new basic - /// block. - void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, - bool FunctionBasedInstrumentation) { - // Make sure we are not initialized yet. - assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is " - "already initialized!"); - ParentMBB = MBB; - if (!FunctionBasedInstrumentation) { - SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true); - FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB); - } - } - - /// Reset state that changes when we handle different basic blocks. - /// - /// This currently includes: - /// - /// 1. The specific basic block we are generating a - /// stack protector for (ParentMBB). - /// - /// 2. The successor machine basic block that will contain the tail of - /// parent mbb after we create the stack protector check (SuccessMBB). This - /// BB is visited only on stack protector check success. - void resetPerBBState() { - ParentMBB = nullptr; - SuccessMBB = nullptr; - } - - /// Reset state that only changes when we switch functions. - /// - /// This currently includes: - /// - /// 1. FailureMBB since we reuse the failure code path for all stack - /// protector checks created in an individual function. - /// - /// 2.The guard variable since the guard variable we are checking against is - /// always the same. - void resetPerFunctionState() { - FailureMBB = nullptr; - } - - MachineBasicBlock *getParentMBB() { return ParentMBB; } - MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } - MachineBasicBlock *getFailureMBB() { return FailureMBB; } - - private: - /// The basic block for which we are generating the stack protector. - /// - /// As a result of stack protector generation, we will splice the - /// terminators of this basic block into the successor mbb SuccessMBB and - /// replace it with a compare/branch to the successor mbbs - /// SuccessMBB/FailureMBB depending on whether or not the stack protector - /// was violated. - MachineBasicBlock *ParentMBB = nullptr; - - /// A basic block visited on stack protector check success that contains the - /// terminators of ParentMBB. - MachineBasicBlock *SuccessMBB = nullptr; - - /// This basic block visited on stack protector check failure that will - /// contain a call to __stack_chk_fail(). - MachineBasicBlock *FailureMBB = nullptr; - - /// Add a successor machine basic block to ParentMBB. If the successor mbb - /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic - /// block will be created. Assign a large weight if IsLikely is true. - MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB, - MachineBasicBlock *ParentMBB, - bool IsLikely, - MachineBasicBlock *SuccMBB = nullptr); - }; - private: const TargetMachine &TM; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -10273,27 +10273,6 @@ ConstantsOut.clear(); } -/// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB -/// is 0. -MachineBasicBlock * -SelectionDAGBuilder::StackProtectorDescriptor:: -AddSuccessorMBB(const BasicBlock *BB, - MachineBasicBlock *ParentMBB, - bool IsLikely, - MachineBasicBlock *SuccMBB) { - // If SuccBB has not been created yet, create it. - if (!SuccMBB) { - MachineFunction *MF = ParentMBB->getParent(); - MachineFunction::iterator BBI(ParentMBB); - SuccMBB = MF->CreateMachineBasicBlock(BB); - MF->insert(++BBI, SuccMBB); - } - // Add it as a successor of ParentMBB. - ParentMBB->addSuccessor( - SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely)); - return SuccMBB; -} - MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) { MachineFunction::iterator I(MBB); if (++I == FuncInfo.MF->end()) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -33,6 +33,7 @@ #include "llvm/Analysis/ProfileSummaryInfo.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/CodeGen/CodeGenCommonISel.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/GCMetadata.h" @@ -1627,89 +1628,6 @@ SDB->SPDescriptor.resetPerFunctionState(); } -/// Given that the input MI is before a partial terminator sequence TSeq, return -/// true if M + TSeq also a partial terminator sequence. -/// -/// A Terminator sequence is a sequence of MachineInstrs which at this point in -/// lowering copy vregs into physical registers, which are then passed into -/// terminator instructors so we can satisfy ABI constraints. A partial -/// terminator sequence is an improper subset of a terminator sequence (i.e. it -/// may be the whole terminator sequence). -static bool MIIsInTerminatorSequence(const MachineInstr &MI) { - // If we do not have a copy or an implicit def, we return true if and only if - // MI is a debug value. - if (!MI.isCopy() && !MI.isImplicitDef()) - // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the - // physical registers if there is debug info associated with the terminator - // of our mbb. We want to include said debug info in our terminator - // sequence, so we return true in that case. - return MI.isDebugValue(); - - // We have left the terminator sequence if we are not doing one of the - // following: - // - // 1. Copying a vreg into a physical register. - // 2. Copying a vreg into a vreg. - // 3. Defining a register via an implicit def. - - // OPI should always be a register definition... - MachineInstr::const_mop_iterator OPI = MI.operands_begin(); - if (!OPI->isReg() || !OPI->isDef()) - return false; - - // Defining any register via an implicit def is always ok. - if (MI.isImplicitDef()) - return true; - - // Grab the copy source... - MachineInstr::const_mop_iterator OPI2 = OPI; - ++OPI2; - assert(OPI2 != MI.operands_end() - && "Should have a copy implying we should have 2 arguments."); - - // Make sure that the copy dest is not a vreg when the copy source is a - // physical register. - if (!OPI2->isReg() || (!Register::isPhysicalRegister(OPI->getReg()) && - Register::isPhysicalRegister(OPI2->getReg()))) - return false; - - return true; -} - -/// Find the split point at which to splice the end of BB into its success stack -/// protector check machine basic block. -/// -/// On many platforms, due to ABI constraints, terminators, even before register -/// allocation, use physical registers. This creates an issue for us since -/// physical registers at this point can not travel across basic -/// blocks. Luckily, selectiondag always moves physical registers into vregs -/// when they enter functions and moves them through a sequence of copies back -/// into the physical registers right before the terminator creating a -/// ``Terminator Sequence''. This function is searching for the beginning of the -/// terminator sequence so that we can ensure that we splice off not just the -/// terminator, but additionally the copies that move the vregs into the -/// physical registers. -static MachineBasicBlock::iterator -FindSplitPointForStackProtector(MachineBasicBlock *BB) { - MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator(); - // - if (SplitPoint == BB->begin()) - return SplitPoint; - - MachineBasicBlock::iterator Start = BB->begin(); - MachineBasicBlock::iterator Previous = SplitPoint; - --Previous; - - while (MIIsInTerminatorSequence(*Previous)) { - SplitPoint = Previous; - if (Previous == Start) - break; - --Previous; - } - - return SplitPoint; -} - void SelectionDAGISel::FinishBasicBlock() { LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: " diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp --- a/llvm/lib/CodeGen/StackProtector.cpp +++ b/llvm/lib/CodeGen/StackProtector.cpp @@ -437,9 +437,8 @@ // protection in SDAG. bool SupportsSelectionDAGSP = TLI->useStackGuardXorFP() || - (EnableSelectionDAGSP && !TM->Options.EnableFastISel && - !TM->Options.EnableGlobalISel); - AllocaInst *AI = nullptr; // Place on stack that stores the stack guard. + (EnableSelectionDAGSP && !TM->Options.EnableFastISel); + AllocaInst *AI = nullptr; // Place on stack that stores the stack guard. for (Function::iterator I = F->begin(), E = F->end(); I != E;) { BasicBlock *BB = &*I++; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-delayed-stack-protector.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-delayed-stack-protector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-delayed-stack-protector.ll @@ -0,0 +1,37 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -verify-machineinstrs -mtriple=aarch64-apple-ios %s -stop-after=irtranslator -o - -global-isel | FileCheck %s + +define void @caller() sspreq { + ; CHECK-LABEL: name: caller + ; CHECK: bb.1.entry: + ; CHECK: successors: %bb.2(0x7ffff800), %bb.3(0x00000800) + ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.StackGuardSlot + ; CHECK: [[LOAD_STACK_GUARD:%[0-9]+]]:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard) + ; CHECK: [[LOAD_STACK_GUARD1:%[0-9]+]]:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard) + ; CHECK: G_STORE [[LOAD_STACK_GUARD1]](p0), [[FRAME_INDEX]](p0) :: (volatile store 8 into %stack.0.StackGuardSlot) + ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.x + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: $x0 = COPY [[FRAME_INDEX1]](p0) + ; CHECK: BL @callee, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.StackGuardSlot + ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX2]](p0) :: (volatile load 8 from %stack.0.StackGuardSlot) + ; CHECK: [[LOAD_STACK_GUARD2:%[0-9]+]]:gpr64sp(s64) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LOAD_STACK_GUARD2]](s64), [[LOAD]] + ; CHECK: G_BRCOND [[ICMP]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.3.entry: + ; CHECK: successors: + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: BL &__stack_chk_fail, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: bb.2.entry: + ; CHECK: RET_ReallyLR +entry: + %x = alloca i32, align 4 + %0 = bitcast i32* %x to i8* + call void @callee(i32* nonnull %x) + ret void +} + +declare void @callee(i32*) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stackprotect-check.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stackprotect-check.ll deleted file mode 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stackprotect-check.ll +++ /dev/null @@ -1,50 +0,0 @@ -; RUN: llc -O0 -stop-before=irtranslator -global-isel %s -o - | FileCheck %s -; RUN: llc -O0 -stop-after=irtranslator -verify-machineinstrs -global-isel %s -o - | FileCheck --check-prefixes CHECK,CHECK-MIR %s - -; Check that when using GlobalISel, the StackProtector pass currently inserts -; both prologue and epilogue instrumentation because GlobalISel does not have -; the same epilogue insertion/optimization as SelectionDAG. - -target triple = "aarch64-none-unknown-eabi" - -define void @foo() ssp { -; CHECK-LABEL: entry: -; CHECK-NEXT: %StackGuardSlot = alloca i8* -; CHECK-NEXT: %0 = call i8* @llvm.stackguard() -; CHECK-NEXT: call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) -; CHECK-NEXT: %buf = alloca [8 x i8], align 1 -; CHECK-NEXT: %1 = call i8* @llvm.stackguard() -; CHECK-NEXT: %2 = load volatile i8*, i8** %StackGuardSlot -; CHECK-NEXT: %3 = icmp eq i8* %1, %2 -; CHECK-NEXT: br i1 %3, label %SP_return, label %CallStackCheckFailBlk, !prof !0 -; -; CHECK: SP_return: -; CHECK-NEXT: ret void -; -; CHECK: CallStackCheckFailBlk: -; CHECK-NEXT: call void @__stack_chk_fail() -; CHECK-NEXT: unreachable - -; CHECK-MIR: bb.1.entry: -; CHECK-MIR: %0:_(p0) = G_FRAME_INDEX %stack.0.StackGuardSlot -; CHECK-MIR-NEXT: %1:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard) -; CHECK-MIR-NEXT: %2:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard) -; CHECK-MIR-NEXT: G_STORE %2(p0), %0(p0) :: (volatile store 8 into %stack.0.StackGuardSlot) -; CHECK-MIR-NEXT: %3:_(p0) = G_FRAME_INDEX %stack.1.buf -; CHECK-MIR-NEXT: %4:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard) -; CHECK-MIR-NEXT: %5:_(p0) = G_LOAD %0(p0) :: (volatile dereferenceable load 8 from %ir.StackGuardSlot) -; CHECK-MIR-NEXT: %6:_(s1) = G_ICMP intpred(eq), %4(p0), %5 -; CHECK-MIR-NEXT: G_BRCOND %6(s1), %bb.2 -; CHECK-MIR-NEXT: G_BR %bb.3 -; -; CHECK-MIR: bb.2.SP_return: -; CHECK-MIR-NEXT: RET_ReallyLR -; -; CHECK-MIR: bb.3.CallStackCheckFailBlk: -; CHECK-MIR-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp -; CHECK-MIR-NEXT: BL @__stack_chk_fail, csr_aarch64_aapcs, implicit-def $lr, implicit $sp -; CHECK-MIR-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp -entry: - %buf = alloca [8 x i8], align 1 - ret void -}