Index: docs/LangRef.rst =================================================================== --- docs/LangRef.rst +++ docs/LangRef.rst @@ -11955,6 +11955,41 @@ different, then ``llvm.stackprotectorcheck`` causes the program to abort by calling the ``__stack_chk_fail()`` function. +'``llvm.stackguard``' Intrinsic +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +:: + + declare i8* @llvm.stackguard() + +Overview: +""""""""" + +The ``llvm.stackguard`` intrinsic returns the system stack guard value. + +It should not be generated by frontends, since it is only for internal usage. +The reason why we create this intrinsic is that we still support IR form Stack +Protector in FastISel. + +Arguments: +"""""""""" + +None. + +Semantics: +"""""""""" + +On some platforms, the value returned by this intrinsic remains unchanged +between loads in the same thread. On other platforms, it returns the same +global variable value, if any, e.g. ``@__stack_chk_guard``. + +Currently some platforms have IR-level customized stack guard loading (e.g. +X86 Linux) that is not handled by ``llvm.stackguard()``, while they should be +in the future. + '``llvm.objectsize``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Index: include/llvm/IR/Intrinsics.td =================================================================== --- include/llvm/IR/Intrinsics.td +++ include/llvm/IR/Intrinsics.td @@ -324,6 +324,7 @@ // Stack Protector Intrinsic - The stackprotector intrinsic writes the stack // guard to the correct place on the stack frame. def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>; +def int_stackguard : Intrinsic<[llvm_ptr_ty], [], []>; // A counter increment for instrumentation based profiling. def int_instrprof_increment : Intrinsic<[], Index: include/llvm/Target/TargetLowering.h =================================================================== --- include/llvm/Target/TargetLowering.h +++ include/llvm/Target/TargetLowering.h @@ -1015,6 +1015,8 @@ /// If the target has a standard location for the stack protector guard, /// returns the address of that location. Otherwise, returns nullptr. + /// DEPRECATED: please override useLoadStackGuardNode and customize + /// LOAD_STACK_GUARD, or customize @llvm.stackguard(). virtual Value *getIRStackGuard(IRBuilder<> &IRB) const; /// Inserts necessary declarations for SSP purpose. Should be used only when Index: lib/CodeGen/MachineCSE.cpp =================================================================== --- lib/CodeGen/MachineCSE.cpp +++ lib/CodeGen/MachineCSE.cpp @@ -352,6 +352,12 @@ // This is a trivial form of alias analysis. return false; } + + // Ignore stack guard loads, otherwise the register that holds CSEed value may + // be spilled and get loaded back with corrupted data. + if (MI->getOpcode() == TargetOpcode::LOAD_STACK_GUARD) + return false; + return true; } Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -465,8 +465,7 @@ class StackProtectorDescriptor { public: StackProtectorDescriptor() - : ParentMBB(nullptr), SuccessMBB(nullptr), FailureMBB(nullptr), - GuardReg(0) {} + : ParentMBB(nullptr), SuccessMBB(nullptr), FailureMBB(nullptr) {} /// Returns true if all fields of the stack protector descriptor are /// initialized implying that we should/are ready to emit a stack protector. @@ -511,16 +510,12 @@ /// always the same. void resetPerFunctionState() { FailureMBB = nullptr; - GuardReg = 0; } MachineBasicBlock *getParentMBB() { return ParentMBB; } MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } MachineBasicBlock *getFailureMBB() { return FailureMBB; } - unsigned getGuardReg() const { return GuardReg; } - void setGuardReg(unsigned R) { GuardReg = R; } - private: /// The basic block for which we are generating the stack protector. /// @@ -539,9 +534,6 @@ /// contain a call to __stack_chk_fail(). MachineBasicBlock *FailureMBB; - /// The virtual register holding the stack guard value. - unsigned GuardReg; - /// Add a successor machine basic block to ParentMBB. If the successor mbb /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic /// block will be created. Assign a large weight if IsLikely is true. Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1998,6 +1998,26 @@ DAG.setRoot(BrCond); } +/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global +/// variable if there exists one. +static SDValue getLoadStackGuard(SelectionDAG &DAG, SDLoc DL, SDValue &Chain) { + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); + MachineFunction &MF = DAG.getMachineFunction(); + Value *Global = TLI.getSDStackGuard(*MF.getFunction()->getParent()); + MachineSDNode *Node = + DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain); + if (Global) { + MachinePointerInfo MPInfo(Global); + MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1); + unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant; + *MemRefs = MF.getMachineMemOperand(MPInfo, Flags, PtrTy.getSizeInBits() / 8, + DAG.getEVTAlignment(PtrTy)); + Node->setMemRefs(MemRefs, MemRefs + 1); + } + return SDValue(Node, 0); +} + /// Codegen a new tail for a stack protector check ParentMBB which has had its /// tail spliced into a stack protector check success bb. /// @@ -2026,18 +2046,15 @@ SDValue Guard; SDLoc dl = getCurSDLoc(); - // If GuardReg is set and useLoadStackGuardNode returns true, retrieve the - // guard value from the virtual register holding the value. Otherwise, emit a - // volatile load to retrieve the stack guard value. - unsigned GuardReg = SPD.getGuardReg(); - - if (GuardReg && TLI.useLoadStackGuardNode()) - Guard = DAG.getCopyFromReg(DAG.getEntryNode(), dl, GuardReg, - PtrTy); + // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. + // Otherwise, emit a volatile load to retrieve the stack guard value. + SDValue Chain = DAG.getEntryNode(); + if (TLI.useLoadStackGuardNode()) + Guard = getLoadStackGuard(DAG, dl, Chain); else - Guard = DAG.getLoad(PtrTy, dl, DAG.getEntryNode(), - GuardPtr, MachinePointerInfo(IRGuard, 0), - true, false, false, Align); + Guard = + DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0), + true, false, false, Align); SDValue StackSlot = DAG.getLoad( PtrTy, dl, DAG.getEntryNode(), StackSlotPtr, @@ -5288,47 +5305,35 @@ setValue(&I, Res); return nullptr; } + case Intrinsic::stackguard: { + EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); + MachineFunction &MF = DAG.getMachineFunction(); + const Module &M = *MF.getFunction()->getParent(); + SDValue Chain = getRoot(); + if (TLI.useLoadStackGuardNode()) { + Res = getLoadStackGuard(DAG, sdl, Chain); + } else { + const Value *Global = TLI.getSDStackGuard(M); + unsigned Align = DL->getPrefTypeAlignment(Global->getType()); + Res = + DAG.getLoad(PtrTy, sdl, Chain, getValue(Global), + MachinePointerInfo(Global, 0), true, false, false, Align); + } + DAG.setRoot(Chain); + setValue(&I, Res); + return nullptr; + } case Intrinsic::stackprotector: { // Emit code into the DAG to store the stack guard onto the stack. MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); SDValue Src, Chain = getRoot(); - const Value *Ptr = cast(I.getArgOperand(0))->getPointerOperand(); - const GlobalVariable *GV = dyn_cast(Ptr); - - // See if Ptr is a bitcast. If it is, look through it and see if we can get - // global variable __stack_chk_guard. - if (!GV) - if (const Operator *BC = dyn_cast(Ptr)) - if (BC->getOpcode() == Instruction::BitCast) - GV = dyn_cast(BC->getOperand(0)); - - if (GV && TLI.useLoadStackGuardNode()) { - // Emit a LOAD_STACK_GUARD node. - MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, - sdl, PtrTy, Chain); - MachinePointerInfo MPInfo(GV); - MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1); - unsigned Flags = MachineMemOperand::MOLoad | - MachineMemOperand::MOInvariant; - *MemRefs = MF.getMachineMemOperand(MPInfo, Flags, - PtrTy.getSizeInBits() / 8, - DAG.getEVTAlignment(PtrTy)); - Node->setMemRefs(MemRefs, MemRefs + 1); - - // Copy the guard value to a virtual register so that it can be - // retrieved in the epilogue. - Src = SDValue(Node, 0); - const TargetRegisterClass *RC = - TLI.getRegClassFor(Src.getSimpleValueType()); - unsigned Reg = MF.getRegInfo().createVirtualRegister(RC); - - SPDescriptor.setGuardReg(Reg); - Chain = DAG.getCopyToReg(Chain, sdl, Reg, Src); - } else { + + if (TLI.useLoadStackGuardNode()) + Src = getLoadStackGuard(DAG, sdl, Chain); + else Src = getValue(I.getArgOperand(0)); // The guard's value. - } AllocaInst *Slot = cast(I.getArgOperand(1)); Index: lib/CodeGen/StackProtector.cpp =================================================================== --- lib/CodeGen/StackProtector.cpp +++ lib/CodeGen/StackProtector.cpp @@ -271,36 +271,41 @@ return NeedsProtector; } -/// Insert code into the entry block that stores the __stack_chk_guard +/// Create a stack guard loading and populate whether SelectionDAG SSP is +/// supported. +static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M, + IRBuilder<> &B, + bool *SupportsSelectionDAGSP = nullptr) { + if (Value *Guard = TLI->getIRStackGuard(B)) + return B.CreateLoad(Guard, true, "StackGuard"); + + /// Use SelectionDAG SSP handling, since there isn't an IR guard. + if (SupportsSelectionDAGSP) + *SupportsSelectionDAGSP = true; + TLI->insertSSPDeclarations(*M); + return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard)); +} + +/// Insert code into the entry block that stores the stack guard /// variable onto the stack: /// /// entry: /// StackGuardSlot = alloca i8* -/// StackGuard = load __stack_chk_guard -/// call void @llvm.stackprotect.create(StackGuard, StackGuardSlot) +/// StackGuard = +/// call void @llvm.stackprotector(StackGuard, StackGuardSlot) /// /// Returns true if the platform/triple supports the stackprotectorcreate pseudo /// node. static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI, - const TargetLoweringBase *TLI, AllocaInst *&AI, - Value *&StackGuardVar) { + const TargetLoweringBase *TLI, AllocaInst *&AI) { bool SupportsSelectionDAGSP = false; IRBuilder<> B(&F->getEntryBlock().front()); - - StackGuardVar = TLI->getIRStackGuard(B); - if (!StackGuardVar) { - /// Use SelectionDAG SSP handling, since there isn't an IR guard. - SupportsSelectionDAGSP = true; - TLI->insertSSPDeclarations(*M); - StackGuardVar = TLI->getSDStackGuard(*M); - } - assert(StackGuardVar && "Must have stack guard available"); - PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext()); AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot"); - LoadInst *LI = B.CreateLoad(StackGuardVar, "StackGuard"); + + Value *Guard = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP); B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector), - {LI, AI}); + {Guard, AI}); return SupportsSelectionDAGSP; } @@ -314,7 +319,6 @@ bool SupportsSelectionDAGSP = EnableSelectionDAGSP && !TM->Options.EnableFastISel; AllocaInst *AI = nullptr; // Place on stack that stores the stack guard. - Value *StackGuardVar = nullptr; // The stack guard variable. for (Function::iterator I = F->begin(), E = F->end(); I != E;) { BasicBlock *BB = &*I++; @@ -324,8 +328,7 @@ if (!HasPrologue) { HasPrologue = true; - SupportsSelectionDAGSP &= - CreatePrologue(F, M, RI, TLI, AI, StackGuardVar); + SupportsSelectionDAGSP &= CreatePrologue(F, M, RI, TLI, AI); } if (!SupportsSelectionDAGSP) { @@ -342,7 +345,7 @@ // // return: // ... - // %1 = load __stack_chk_guard + // %1 = // %2 = load StackGuardSlot // %3 = cmp i1 %1, %2 // br i1 %3, label %SP_return, label %CallStackCheckFailBlk @@ -381,9 +384,9 @@ // Generate the stack protector instructions in the old basic block. IRBuilder<> B(BB); - LoadInst *LI1 = B.CreateLoad(StackGuardVar); - LoadInst *LI2 = B.CreateLoad(AI); - Value *Cmp = B.CreateICmpEQ(LI1, LI2); + Value *Guard = getStackGuard(TLI, M, B); + LoadInst *LI2 = B.CreateLoad(AI, true); + Value *Cmp = B.CreateICmpEQ(Guard, LI2); auto SuccessProb = BranchProbabilityInfo::getBranchProbStackProtector(true); auto FailureProb = Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10115,7 +10115,9 @@ } bool AArch64TargetLowering::useLoadStackGuardNode() const { - return true; + if (!Subtarget->isTargetAndroid()) + return true; + return TargetLowering::useLoadStackGuardNode(); } unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const { Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -2068,14 +2068,12 @@ void X86TargetLowering::insertSSPDeclarations(Module &M) const { if (!Subtarget.isTargetLinux()) TargetLowering::insertSSPDeclarations(M); - else - llvm_unreachable("X86 Linux supports customized IR stack guard load"); } Value *X86TargetLowering::getSDStackGuard(const Module &M) const { if (!Subtarget.isTargetLinux()) return TargetLowering::getSDStackGuard(M); - llvm_unreachable("X86 Linux supports customized IR stack guard load"); + return nullptr; } Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const { Index: test/CodeGen/AArch64/stack-guard-remat-bitcast.ll =================================================================== --- test/CodeGen/AArch64/stack-guard-remat-bitcast.ll +++ test/CodeGen/AArch64/stack-guard-remat-bitcast.ll @@ -6,10 +6,13 @@ ; CHECK: adrp [[R0:x[0-9]+]], ___stack_chk_guard@GOTPAGE ; CHECK: ldr [[R1:x[0-9]+]], {{\[}}[[R0]], ___stack_chk_guard@GOTPAGEOFF{{\]}} +; CHECK: adrp [[GUARD_PAGE:x[0-9]+]], ___stack_chk_guard@GOTPAGE ; CHECK: ldr [[R2:x[0-9]+]], {{\[}}[[R1]]{{\]}} ; CHECK: stur [[R2]], {{\[}}x29, [[SLOT0:[0-9#\-]+]]{{\]}} ; CHECK: ldur [[R3:x[0-9]+]], {{\[}}x29, [[SLOT0]]{{\]}} -; CHECK: sub [[R4:x[0-9]+]], [[R2]], [[R3]] +; CHECK: ldr [[GUARD_ADDR:x[0-9]+]], {{\[}}[[GUARD_PAGE]], ___stack_chk_guard@GOTPAGEOFF{{\]}} +; CHECK: ldr [[GUARD:x[0-9]+]], {{\[}}[[GUARD_ADDR]]{{\]}} +; CHECK: sub [[R4:x[0-9]+]], [[GUARD]], [[R3]] ; CHECK: cbnz [[R4]], LBB define i32 @test_stack_guard_remat2() { Index: test/CodeGen/ARM/warn-stack.ll =================================================================== --- test/CodeGen/ARM/warn-stack.ll +++ test/CodeGen/ARM/warn-stack.ll @@ -12,7 +12,7 @@ ret void } -; CHECK: warning: stack size limit exceeded (96) in warn +; CHECK: warning: stack size limit exceeded (92) in warn define void @warn() nounwind ssp { entry: %buffer = alloca [80 x i8], align 1 Index: test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll =================================================================== --- test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll +++ test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll @@ -12,7 +12,7 @@ ; movq %rax, %rsp ; CHECK-LABEL: @foo -; CHECK: movq -40(%rbp), %rsp +; CHECK: movq -{{[0-9]+}}(%rbp), %rsp entry: %s1_addr = alloca i8* ; [#uses=2] Index: test/CodeGen/X86/StackColoring.ll =================================================================== --- test/CodeGen/X86/StackColoring.ll +++ test/CodeGen/X86/StackColoring.ll @@ -243,8 +243,8 @@ ; Adopt the test from Transforms/Inline/array_merge.ll' ;CHECK-LABEL: array_merge: -;YESCOLOR: subq $816, %rsp -;NOCOLOR: subq $1616, %rsp +;YESCOLOR: subq $808, %rsp +;NOCOLOR: subq $1608, %rsp define void @array_merge() nounwind ssp { entry: %A.i1 = alloca [100 x i32], align 4 Index: test/CodeGen/X86/dynamic-allocas-VLAs.ll =================================================================== --- test/CodeGen/X86/dynamic-allocas-VLAs.ll +++ test/CodeGen/X86/dynamic-allocas-VLAs.ll @@ -60,12 +60,10 @@ ; CHECK: _t3 ; CHECK: pushq %rbp ; CHECK: movq %rsp, %rbp -; CHECK: pushq %rbx ; CHECK-NOT: andq $-{{[0-9]+}}, %rsp ; CHECK: subq ${{[0-9]+}}, %rsp ; -; CHECK: leaq -{{[0-9]+}}(%rbp), %rsp -; CHECK: popq %rbx +; CHECK: movq %rbp, %rsp ; CHECK: popq %rbp } @@ -85,7 +83,6 @@ ; CHECK: _t4 ; CHECK: pushq %rbp ; CHECK: movq %rsp, %rbp -; CHECK: pushq %r14 ; CHECK: pushq %rbx ; CHECK: andq $-32, %rsp ; CHECK: subq ${{[0-9]+}}, %rsp @@ -95,9 +92,8 @@ ; CHECK: leaq {{[0-9]*}}(%rbx), %rdx ; CHECK: callq _t4_helper ; -; CHECK: leaq -16(%rbp), %rsp +; CHECK: leaq -{{[0-9]+}}(%rbp), %rsp ; CHECK: popq %rbx -; CHECK: popq %r14 ; CHECK: popq %rbp } Index: test/CodeGen/X86/fast-isel-stackcheck.ll =================================================================== --- test/CodeGen/X86/fast-isel-stackcheck.ll +++ test/CodeGen/X86/fast-isel-stackcheck.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: foo: ; CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), %rax -; CHECK-NOT: movq ___stack_chk_guard@GOTPCREL(%rip), %rax +; CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), %rax define void @foo() #0 { entry: %_tags = alloca [3 x i32], align 4 @@ -16,8 +16,10 @@ } ; CHECK-LABEL: bar: -; CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), %rax -; CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), %rax +; CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), %{{r.x}} +; CHECK-DAG: movq ___stack_chk_guard@GOTPCREL(%rip), %[[GUARD:r.x]] +; CHECK-DAG: movq {{[0-9]+}}(%rsp), %[[CANARY:r.x]] +; CHECK: subq %[[CANARY]], %[[GUARD]] define void @bar() #1 { entry: %vt = alloca [2 x double], align 16 Index: test/CodeGen/X86/ssp-guard-spill.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/ssp-guard-spill.ll @@ -0,0 +1,48 @@ +; RUN: llc < %s | FileCheck %s +target triple = "x86_64-apple-macosx10.4.0" + +; bool Bar(int*); +; bool Foo(int n) { +; int a[10]; +; for (int i = 0; i < n; i++) { +; a[i] = 0; +; } +; return Bar(a); +; } +; CHECK: movq ___stack_chk_guard +; CHECK: movq ___stack_chk_guard +define zeroext i1 @_Z3Fooi(i32 %n) sspstrong { +entry: + %n.addr = alloca i32, align 4 + %a = alloca [10 x i32], align 16 + %i = alloca i32, align 4 + store i32 %n, i32* %n.addr, align 4 + store i32 0, i32* %i, align 4 + br label %for.cond + +for.cond: ; preds = %for.inc, %entry + %tmp = load i32, i32* %i, align 4 + %tmp1 = load i32, i32* %n.addr, align 4 + %cmp = icmp slt i32 %tmp, %tmp1 + br i1 %cmp, label %for.body, label %for.end + +for.body: ; preds = %for.cond + %tmp2 = load i32, i32* %i, align 4 + %idxprom = sext i32 %tmp2 to i64 + %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %a, i64 0, i64 %idxprom + store i32 0, i32* %arrayidx, align 4 + br label %for.inc + +for.inc: ; preds = %for.body + %tmp3 = load i32, i32* %i, align 4 + %inc = add nsw i32 %tmp3, 1 + store i32 %inc, i32* %i, align 4 + br label %for.cond + +for.end: ; preds = %for.cond + %arraydecay = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i32 0 + %call = call zeroext i1 @_Z3BarPi(i32* %arraydecay) + ret i1 %call +} + +declare zeroext i1 @_Z3BarPi(i32*) Index: test/CodeGen/X86/warn-stack.ll =================================================================== --- test/CodeGen/X86/warn-stack.ll +++ test/CodeGen/X86/warn-stack.ll @@ -12,7 +12,7 @@ ret void } -; CHECK: warning: stack size limit exceeded (104) in warn +; CHECK: warning: stack size limit exceeded (88) in warn define void @warn() nounwind ssp { entry: %buffer = alloca [80 x i8], align 1 Index: test/DebugInfo/X86/vla.ll =================================================================== --- test/DebugInfo/X86/vla.ll +++ test/DebugInfo/X86/vla.ll @@ -1,7 +1,7 @@ ; RUN: llc -O0 -mtriple=x86_64-apple-darwin -filetype=asm %s -o - | FileCheck %s ; Ensure that we generate an indirect location for the variable length array a. -; CHECK: ##DEBUG_VALUE: vla:a <- [%RDX+0] -; CHECK: DW_OP_breg1 +; CHECK: ##DEBUG_VALUE: vla:a <- [%RCX+0] +; CHECK: DW_OP_breg2 ; rdar://problem/13658587 ; ; generated from: