diff --git a/llvm/include/llvm/CodeGen/StackMaps.h b/llvm/include/llvm/CodeGen/StackMaps.h --- a/llvm/include/llvm/CodeGen/StackMaps.h +++ b/llvm/include/llvm/CodeGen/StackMaps.h @@ -148,9 +148,13 @@ /// , , /// , , /// , , [deopt args...], -/// -/// Note that the last two sets of arguments are not currently length -/// prefixed. +/// , , [gc pointer args...], +/// , , [gc allocas args...], +/// , , [base/derived pairs] +/// base/derived pairs in gc map are logical indices into +/// section. +/// All gc pointers assigned to VRegs produce new value (in form of MI Def +/// operand) and are tied to it. class StatepointOpers { // TODO:: we should change the STATEPOINT representation so that CC and // Flags should be part of meta operands, with args and deopt operands, and @@ -217,6 +221,19 @@ /// Return the statepoint flags. uint64_t getFlags() const { return MI->getOperand(getFlagsIdx()).getImm(); } + uint64_t getNumDeoptArgs() const { + return MI->getOperand(getNumDeoptArgsIdx()).getImm(); + } + + /// Get index of first GC pointer operand of -1 if there are none. + int getFirstGCPtrIdx(); + + /// Get vector of base/derived pairs from statepoint. + /// Elements are indices into GC Pointer operand list (logical). + /// Returns number of elements in GCMap. + unsigned + getGCPointerMap(SmallVectorImpl> &GCMap); + private: const MachineInstr *MI; unsigned NumDefs; @@ -263,7 +280,7 @@ /// Get index of next meta operand. /// Similar to parseOperand, but does not actually parses operand meaning. - static unsigned getNextMetaArgIdx(MachineInstr *MI, unsigned CurIdx); + static unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx); void reset() { CSInfos.clear(); @@ -337,6 +354,13 @@ MachineInstr::const_mop_iterator MOE, LocationVec &Locs, LiveOutVec &LiveOuts) const; + /// Specialized parser of statepoint operands. + /// They do not directly correspond to StackMap record entries. + void parseStatepointOpers(const MachineInstr &MI, + MachineInstr::const_mop_iterator MOI, + MachineInstr::const_mop_iterator MOE, + LocationVec &Locations, LiveOutVec &LiveOuts); + /// Create a live-out register record for the given register @p Reg. LiveOutReg createLiveOutReg(unsigned Reg, const TargetRegisterInfo *TRI) const; diff --git a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp --- a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp +++ b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp @@ -98,48 +98,6 @@ return TRI.getSpillSize(*RC); } -// Advance iterator to the next stack map entry -static MachineInstr::const_mop_iterator -advanceToNextStackMapElt(MachineInstr::const_mop_iterator MOI) { - if (MOI->isImm()) { - switch (MOI->getImm()) { - default: - llvm_unreachable("Unrecognized operand type."); - case StackMaps::DirectMemRefOp: - MOI += 2; // , - break; - case StackMaps::IndirectMemRefOp: - MOI += 3; // , , - break; - case StackMaps::ConstantOp: - MOI += 1; - break; - } - } - return ++MOI; -} - -// Return statepoint GC args as a set -static SmallSet collectGCRegs(MachineInstr &MI) { - StatepointOpers SO(&MI); - unsigned NumDeoptIdx = SO.getNumDeoptArgsIdx(); - unsigned NumDeoptArgs = MI.getOperand(NumDeoptIdx).getImm(); - MachineInstr::const_mop_iterator MOI(MI.operands_begin() + NumDeoptIdx + 1), - MOE(MI.operands_end()); - - // Skip deopt args - while (NumDeoptArgs--) - MOI = advanceToNextStackMapElt(MOI); - - SmallSet Result; - while (MOI != MOE) { - if (MOI->isReg() && !MOI->isImplicit()) - Result.insert(MOI->getReg()); - MOI = advanceToNextStackMapElt(MOI); - } - return Result; -} - // Try to eliminate redundant copy to register which we're going to // spill, i.e. try to change: // X = COPY Y @@ -411,8 +369,13 @@ // Also cache the size of found registers. // Returns true if caller save registers found. bool findRegistersToSpill() { + SmallSet GCRegs; + // All GC pointer operands assigned to registers produce new value. + // Since they're tied to their defs, it is enough to collect def registers. + for (const auto &Def : MI.defs()) + GCRegs.insert(Def.getReg()); + SmallSet VisitedRegs; - SmallSet GCRegs = collectGCRegs(MI); for (unsigned Idx = StatepointOpers(&MI).getVarIdx(), EndIdx = MI.getNumOperands(); Idx < EndIdx; ++Idx) { diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -82,19 +82,6 @@ return N; } -/// Return starting index of GC operand list. -// FIXME: need a better place for this. Put it in StackMaps? -static unsigned getStatepointGCArgStartIdx(MachineInstr *MI) { - assert(MI->getOpcode() == TargetOpcode::STATEPOINT && - "STATEPOINT node expected"); - unsigned OperIdx = StatepointOpers(MI).getNumDeoptArgsIdx(); - unsigned NumDeopts = MI->getOperand(OperIdx).getImm(); - ++OperIdx; - while (NumDeopts--) - OperIdx = StackMaps::getNextMetaArgIdx(MI, OperIdx); - return OperIdx; -} - /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an /// implicit physical register output. void InstrEmitter:: @@ -993,14 +980,13 @@ assert(!HasPhysRegOuts && "STATEPOINT mishandled"); MachineInstr *MI = MIB; unsigned Def = 0; - unsigned Use = getStatepointGCArgStartIdx(MI); - Use = StackMaps::getNextMetaArgIdx(MI, Use); // first derived - assert(Use < MI->getNumOperands()); + int First = StatepointOpers(MI).getFirstGCPtrIdx(); + assert(First > 0 && "Statepoint has Defs but no GC ptr list"); + unsigned Use = (unsigned)First; while (Def < NumDefs) { if (MI->getOperand(Use).isReg()) MI->tieOperands(Def++, Use); - Use = StackMaps::getNextMetaArgIdx(MI, Use); // next base - Use = StackMaps::getNextMetaArgIdx(MI, Use); // next derived + Use = StackMaps::getNextMetaArgIdx(MI, Use); } } diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -495,6 +495,7 @@ static void lowerStatepointMetaArgs(SmallVectorImpl &Ops, SmallVectorImpl &MemRefs, + SmallVectorImpl &GCPtrs, DenseMap &LowerAsVReg, SelectionDAGBuilder::StatepointLoweringInfo &SI, SelectionDAGBuilder &Builder) { @@ -547,21 +548,39 @@ unsigned MaxVRegPtrs = std::min(MaxTiedRegs, MaxRegistersForGCPointers.getValue()); - LLVM_DEBUG(dbgs() << "Desiding how to lower GC Pointers:\n"); + LLVM_DEBUG(dbgs() << "Deciding how to lower GC Pointers:\n"); + + // List of unique lowered GC Pointer values. + SmallSetVector LoweredGCPtrs; + // Map lowered GC Pointer value to the index in above vector + DenseMap GCPtrIndexMap; + unsigned CurNumVRegs = 0; - for (const Value *P : SI.Ptrs) { + + auto processGCPtr = [&](const Value *V) { + SDValue PtrSD = Builder.getValue(V); + if (!LoweredGCPtrs.insert(PtrSD)) + return; // skip duplicates + GCPtrIndexMap[PtrSD] = LoweredGCPtrs.size() - 1; + + assert(!LowerAsVReg.count(PtrSD) && "must not have been seen"); if (LowerAsVReg.size() == MaxVRegPtrs) - break; - SDValue PtrSD = Builder.getValue(P); - if (willLowerDirectly(PtrSD) || P->getType()->isVectorTy()) { + return; + if (willLowerDirectly(PtrSD) || V->getType()->isVectorTy()) { LLVM_DEBUG(dbgs() << "direct/spill "; PtrSD.dump(&Builder.DAG)); - continue; + return; } LLVM_DEBUG(dbgs() << "vreg "; PtrSD.dump(&Builder.DAG)); LowerAsVReg[PtrSD] = CurNumVRegs++; - } - LLVM_DEBUG(dbgs() << LowerAsVReg.size() - << " derived pointers will go in vregs\n"); + }; + + // Process derived pointers first to give them more chance to go on VReg. + for (const Value *V : SI.Ptrs) + processGCPtr(V); + for (const Value *V : SI.Bases) + processGCPtr(V); + + LLVM_DEBUG(dbgs() << LowerAsVReg.size() << " pointers will go in vregs\n"); auto isGCValue = [&](const Value *V) { auto *Ty = V->getType(); @@ -589,13 +608,16 @@ reservePreviousStackSlotForValue(V, Builder); } - for (unsigned i = 0; i < SI.Bases.size(); ++i) { - SDValue SDV = Builder.getValue(SI.Bases[i]); - if (AlwaysSpillBase || !LowerAsVReg.count(SDV)) - reservePreviousStackSlotForValue(SI.Bases[i], Builder); - SDV = Builder.getValue(SI.Ptrs[i]); + for (const Value *V : SI.Ptrs) { + SDValue SDV = Builder.getValue(V); + if (!LowerAsVReg.count(SDV)) + reservePreviousStackSlotForValue(V, Builder); + } + + for (const Value *V : SI.Bases) { + SDValue SDV = Builder.getValue(V); if (!LowerAsVReg.count(SDV)) - reservePreviousStackSlotForValue(SI.Ptrs[i], Builder); + reservePreviousStackSlotForValue(V, Builder); } // First, prefix the list with the number of unique values to be @@ -624,43 +646,51 @@ Builder); } - // Finally, go ahead and lower all the gc arguments. There's no prefixed - // length for this one. After lowering, we'll have the base and pointer - // arrays interwoven with each (lowered) base pointer immediately followed by - // it's (lowered) derived pointer. i.e - // (base[0], ptr[0], base[1], ptr[1], ...) - for (unsigned i = 0; i < SI.Bases.size(); ++i) { - bool RequireSpillSlot; - SDValue Base = Builder.getValue(SI.Bases[i]); - RequireSpillSlot = AlwaysSpillBase || !LowerAsVReg.count(Base); - lowerIncomingStatepointValue(Base, RequireSpillSlot, Ops, MemRefs, + // Finally, go ahead and lower all the gc arguments. + pushStackMapConstant(Ops, Builder, LoweredGCPtrs.size()); + for (SDValue SDV : LoweredGCPtrs) + lowerIncomingStatepointValue(SDV, !LowerAsVReg.count(SDV), Ops, MemRefs, Builder); - SDValue Derived = Builder.getValue(SI.Ptrs[i]); - RequireSpillSlot = !LowerAsVReg.count(Derived); - lowerIncomingStatepointValue(Derived, RequireSpillSlot, Ops, MemRefs, - Builder); - } + // Copy to out vector. LoweredGCPtrs will be empty after this point. + GCPtrs = LoweredGCPtrs.takeVector(); // If there are any explicit spill slots passed to the statepoint, record // them, but otherwise do not do anything special. These are user provided // allocas and give control over placement to the consumer. In this case, // it is the contents of the slot which may get updated, not the pointer to // the alloca + SmallVector Allocas; for (Value *V : SI.GCArgs) { SDValue Incoming = Builder.getValue(V); if (FrameIndexSDNode *FI = dyn_cast(Incoming)) { // This handles allocas as arguments to the statepoint assert(Incoming.getValueType() == Builder.getFrameIndexTy() && "Incoming value is a frame index!"); - Ops.push_back(Builder.DAG.getTargetFrameIndex(FI->getIndex(), - Builder.getFrameIndexTy())); + Allocas.push_back(Builder.DAG.getTargetFrameIndex( + FI->getIndex(), Builder.getFrameIndexTy())); auto &MF = Builder.DAG.getMachineFunction(); auto *MMO = getMachineMemOperand(MF, *FI); MemRefs.push_back(MMO); } } + pushStackMapConstant(Ops, Builder, Allocas.size()); + Ops.append(Allocas.begin(), Allocas.end()); + + // Now construct GC base/derived map; + pushStackMapConstant(Ops, Builder, SI.Ptrs.size()); + SDLoc L = Builder.getCurSDLoc(); + for (unsigned i = 0; i < SI.Ptrs.size(); ++i) { + SDValue Base = Builder.getValue(SI.Bases[i]); + assert(GCPtrIndexMap.count(Base) && "base not found in index map"); + Ops.push_back( + Builder.DAG.getTargetConstant(GCPtrIndexMap[Base], L, MVT::i64)); + SDValue Derived = Builder.getValue(SI.Ptrs[i]); + assert(GCPtrIndexMap.count(Derived) && "derived not found in index map"); + Ops.push_back( + Builder.DAG.getTargetConstant(GCPtrIndexMap[Derived], L, MVT::i64)); + } } SDValue SelectionDAGBuilder::LowerAsSTATEPOINT( @@ -683,11 +713,16 @@ #endif // Lower statepoint vmstate and gcstate arguments + + // All lowered meta args. SmallVector LoweredMetaArgs; + // Lowered GC pointers (subset of above). + SmallVector LoweredGCArgs; SmallVector MemRefs; // Maps derived pointer SDValue to statepoint result of relocated pointer. DenseMap LowerAsVReg; - lowerStatepointMetaArgs(LoweredMetaArgs, MemRefs, LowerAsVReg, SI, *this); + lowerStatepointMetaArgs(LoweredMetaArgs, MemRefs, LoweredGCArgs, LowerAsVReg, + SI, *this); // Now that we've emitted the spills, we need to update the root so that the // call sequence is ordered correctly. @@ -802,8 +837,7 @@ // Compute return values. Provide a glue output since we consume one as // input. This allows someone else to chain off us as needed. SmallVector NodeTys; - for (auto &Ptr : SI.Ptrs) { - SDValue SD = getValue(Ptr); + for (auto SD : LoweredGCArgs) { if (!LowerAsVReg.count(SD)) continue; NodeTys.push_back(SD.getValueType()); diff --git a/llvm/lib/CodeGen/StackMaps.cpp b/llvm/lib/CodeGen/StackMaps.cpp --- a/llvm/lib/CodeGen/StackMaps.cpp +++ b/llvm/lib/CodeGen/StackMaps.cpp @@ -45,6 +45,14 @@ const char *StackMaps::WSMP = "Stack Maps: "; +static uint64_t getConstMetaVal(const MachineInstr &MI, unsigned Idx) { + assert(MI.getOperand(Idx).isImm() && + MI.getOperand(Idx).getImm() == StackMaps::ConstantOp); + const auto &MO = MI.getOperand(Idx + 1); + assert(MO.isImm()); + return MO.getImm(); +} + StackMapOpers::StackMapOpers(const MachineInstr *MI) : MI(MI) { assert(getVarIdx() <= MI->getNumOperands() && @@ -83,12 +91,56 @@ return ScratchIdx; } +int StatepointOpers::getFirstGCPtrIdx() { + unsigned NumDeoptsIdx = getNumDeoptArgsIdx(); + unsigned NumDeoptArgs = MI->getOperand(NumDeoptsIdx).getImm(); + + unsigned CurIdx = NumDeoptsIdx + 1; + while (NumDeoptArgs--) { + CurIdx = StackMaps::getNextMetaArgIdx(MI, CurIdx); + } + ++CurIdx; // + unsigned NumGCPtrs = MI->getOperand(CurIdx).getImm(); + if (NumGCPtrs == 0) + return -1; + ++CurIdx; // + assert(CurIdx < MI->getNumOperands() && "Index points past operand list"); + return (int)CurIdx; +} + +unsigned StatepointOpers::getGCPointerMap( + SmallVectorImpl> &GCMap) { + int FirstGCIdx = getFirstGCPtrIdx(); + if (FirstGCIdx == -1) + return 0; + unsigned NumGCPtr = getConstMetaVal(*MI, (unsigned)FirstGCIdx - 2); + unsigned CurIdx = (unsigned)FirstGCIdx; + while (NumGCPtr--) + CurIdx = StackMaps::getNextMetaArgIdx(MI, CurIdx); + + unsigned NumAllocas = getConstMetaVal(*MI, CurIdx); + CurIdx += 2; + while (NumAllocas--) + CurIdx = StackMaps::getNextMetaArgIdx(MI, CurIdx); + + assert(CurIdx < MI->getNumOperands()); + unsigned GCMapSize = getConstMetaVal(*MI, CurIdx); + CurIdx += 2; + for (unsigned N = 0; N < GCMapSize; ++N) { + unsigned B = MI->getOperand(CurIdx++).getImm(); + unsigned D = MI->getOperand(CurIdx++).getImm(); + GCMap.push_back(std::make_pair(B, D)); + } + + return GCMapSize; +} + StackMaps::StackMaps(AsmPrinter &AP) : AP(AP) { if (StackMapVersion != 3) llvm_unreachable("Unsupported stackmap version!"); } -unsigned StackMaps::getNextMetaArgIdx(MachineInstr *MI, unsigned CurIdx) { +unsigned StackMaps::getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx) { assert(CurIdx < MI->getNumOperands() && "Bad meta arg index"); const auto &MO = MI->getOperand(CurIdx); if (MO.isImm()) { @@ -317,6 +369,76 @@ return LiveOuts; } +// See statepoint MI format description in StatepointOpers' class comment +// in include/llvm/CodeGen/StackMaps.h +void StackMaps::parseStatepointOpers(const MachineInstr &MI, + MachineInstr::const_mop_iterator MOI, + MachineInstr::const_mop_iterator MOE, + LocationVec &Locations, + LiveOutVec &LiveOuts) { + LLVM_DEBUG(dbgs() << "record statepoint : " << MI << "\n"); + StatepointOpers SO(&MI); + MOI = parseOperand(MOI, MOE, Locations, LiveOuts); // CC + MOI = parseOperand(MOI, MOE, Locations, LiveOuts); // Flags + MOI = parseOperand(MOI, MOE, Locations, LiveOuts); // Num Deopts + + // Record Deopt Args. + unsigned NumDeoptArgs = Locations.back().Offset; + assert(Locations.back().Type = Location::Constant); + assert(NumDeoptArgs == SO.getNumDeoptArgs()); + + while (NumDeoptArgs--) + MOI = parseOperand(MOI, MOE, Locations, LiveOuts); + + // Record gc base/derived pairs + assert(MOI->isImm() && MOI->getImm() == StackMaps::ConstantOp); + ++MOI; + assert(MOI->isImm()); + unsigned NumGCPointers = MOI->getImm(); + ++MOI; + if (NumGCPointers) { + // Map logical index of GC ptr to MI operand index. + SmallVector GCPtrIndices; + unsigned GCPtrIdx = (unsigned)SO.getFirstGCPtrIdx(); + assert((int)GCPtrIdx != -1); + assert(MOI - MI.operands_begin() == GCPtrIdx); + while (NumGCPointers--) { + GCPtrIndices.push_back(GCPtrIdx); + GCPtrIdx = StackMaps::getNextMetaArgIdx(&MI, GCPtrIdx); + } + + SmallVector, 8> GCPairs; + unsigned NumGCPairs = SO.getGCPointerMap(GCPairs); + LLVM_DEBUG(dbgs() << "NumGCPairs = " << NumGCPairs << "\n"); + + auto MOB = MI.operands_begin(); + for (auto &P : GCPairs) { + assert(P.first < GCPtrIndices.size() && "base pointer index not found"); + assert(P.second < GCPtrIndices.size() && + "derived pointer index not found"); + unsigned BaseIdx = GCPtrIndices[P.first]; + unsigned DerivedIdx = GCPtrIndices[P.second]; + LLVM_DEBUG(dbgs() << "Base : " << BaseIdx << " Derived : " << DerivedIdx + << "\n"); + (void)parseOperand(MOB + BaseIdx, MOE, Locations, LiveOuts); + (void)parseOperand(MOB + DerivedIdx, MOE, Locations, LiveOuts); + } + + MOI = MOB + GCPtrIdx; + } + + // Record gc allocas + assert(MOI < MOE); + assert(MOI->isImm() && MOI->getImm() == StackMaps::ConstantOp); + ++MOI; + unsigned NumAllocas = MOI->getImm(); + ++MOI; + while (NumAllocas--) { + MOI = parseOperand(MOI, MOE, Locations, LiveOuts); + assert(MOI < MOE); + } +} + void StackMaps::recordStackMapOpers(const MCSymbol &MILabel, const MachineInstr &MI, uint64_t ID, MachineInstr::const_mop_iterator MOI, @@ -334,9 +456,11 @@ } // Parse operands. - while (MOI != MOE) { - MOI = parseOperand(MOI, MOE, Locations, LiveOuts); - } + if (MI.getOpcode() == TargetOpcode::STATEPOINT) + parseStatepointOpers(MI, MOI, MOE, Locations, LiveOuts); + else + while (MOI != MOE) + MOI = parseOperand(MOI, MOE, Locations, LiveOuts); // Move large constants into the constant pool. for (auto &Loc : Locations) { @@ -417,8 +541,6 @@ assert(MI.getOpcode() == TargetOpcode::STATEPOINT && "expected statepoint"); StatepointOpers opers(&MI); - // Record all the deopt and gc operands (they're contiguous and run from the - // initial index to the end of the operand list) const unsigned StartIdx = opers.getVarIdx(); recordStackMapOpers(L, MI, opers.getID(), MI.operands_begin() + StartIdx, MI.operands_end(), false); diff --git a/llvm/test/CodeGen/X86/statepoint-stack-usage.ll b/llvm/test/CodeGen/X86/statepoint-stack-usage.ll --- a/llvm/test/CodeGen/X86/statepoint-stack-usage.ll +++ b/llvm/test/CodeGen/X86/statepoint-stack-usage.ll @@ -11,9 +11,9 @@ define i32 @back_to_back_calls(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #1 gc "statepoint-example" { ; CHECK-LABEL: back_to_back_calls ; The exact stores don't matter, but there need to be three stack slots created -; CHECK-DAG: movq %rdi, 16(%rsp) -; CHECK-DAG: movq %rdx, 8(%rsp) -; CHECK-DAG: movq %rsi, (%rsp) +; CHECK-DAG: movq %rdi, {{[0-9]*}}(%rsp) +; CHECK-DAG: movq %rdx, {{[0-9]*}}(%rsp) +; CHECK-DAG: movq %rsi, {{[0-9]*}}(%rsp) ; There should be no more than three moves ; CHECK-NOT: movq %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c), "deopt" (i32 0, i32 -1, i32 0, i32 0, i32 0)] @@ -36,9 +36,9 @@ define i32 @reserve_first(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #1 gc "statepoint-example" { ; CHECK-LABEL: reserve_first ; The exact stores don't matter, but there need to be three stack slots created -; CHECK-DAG: movq %rdi, 16(%rsp) -; CHECK-DAG: movq %rdx, 8(%rsp) -; CHECK-DAG: movq %rsi, (%rsp) +; CHECK-DAG: movq %rdi, {{[0-9]*}}(%rsp) +; CHECK-DAG: movq %rdx, {{[0-9]*}}(%rsp) +; CHECK-DAG: movq %rsi, {{[0-9]*}}(%rsp) %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c), "deopt" (i32 0, i32 -1, i32 0, i32 0, i32 0)] %a1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0) %b1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 1) @@ -89,9 +89,9 @@ ; CHECK-LABEL: back_to_back_invokes entry: ; The exact stores don't matter, but there need to be three stack slots created - ; CHECK-DAG: movq %rdi, 16(%rsp) - ; CHECK-DAG: movq %rdx, 8(%rsp) - ; CHECK-DAG: movq %rsi, (%rsp) + ; CHECK-DAG: movq %rdi, {{[0-9]*}}(%rsp) + ; CHECK-DAG: movq %rdx, {{[0-9]*}}(%rsp) + ; CHECK-DAG: movq %rsi, {{[0-9]*}}(%rsp) ; CHECK: callq %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c), "deopt" (i32 0, i32 -1, i32 0, i32 0, i32 0)] to label %normal_return unwind label %exceptional_return diff --git a/llvm/test/CodeGen/X86/statepoint-vector.ll b/llvm/test/CodeGen/X86/statepoint-vector.ll --- a/llvm/test/CodeGen/X86/statepoint-vector.ll +++ b/llvm/test/CodeGen/X86/statepoint-vector.ll @@ -32,11 +32,11 @@ ; CHECK-NEXT: movq %rdi, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] ; CHECK-NEXT: paddq %xmm0, %xmm1 -; CHECK-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movdqa %xmm1, (%rsp) +; CHECK-NEXT: movdqa %xmm0, {{[0-9]*}}(%rsp) +; CHECK-NEXT: movdqa %xmm1, {{[0-9]*}}(%rsp) ; CHECK-NEXT: callq do_safepoint ; CHECK-NEXT: .Ltmp1: -; CHECK-NEXT: movaps (%rsp), %xmm0 +; CHECK-NEXT: movaps {{[0-9]*}}(%rsp), %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq @@ -163,13 +163,13 @@ ; CHECK: .short 16 ; CHECK: .short 7 ; CHECK: .short 0 -; CHECK: .long 16 +; CHECK: .long 0 ; CHECK: .byte 3 ; CHECK: .byte 0 ; CHECK: .short 16 ; CHECK: .short 7 ; CHECK: .short 0 -; CHECK: .long 0 +; CHECK: .long 16 ; CHECK: .Ltmp2-test3 ; Check for the four spill slots diff --git a/llvm/test/CodeGen/X86/statepoint-vreg-details.ll b/llvm/test/CodeGen/X86/statepoint-vreg-details.ll --- a/llvm/test/CodeGen/X86/statepoint-vreg-details.ll +++ b/llvm/test/CodeGen/X86/statepoint-vreg-details.ll @@ -24,16 +24,14 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" { ; CHECK-VREG-LABEL: name: test_relocate ; CHECK-VREG: %0:gr64 = COPY $rdi -; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) -; CHECK-VREG: %1:gr64 = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, %0(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $al :: (volatile load store 8 on %stack.0) +; CHECK-VREG: %1:gr64 = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 2, 1, %0(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $al ; CHECK-VREG: %2:gr8 = COPY $al ; CHECK-VREG: $rdi = COPY %1 ; CHECK-VREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp ; CHECK-PREG-LABEL: name: test_relocate ; CHECK-PREG: renamable $rbx = COPY $rdi -; CHECK-PREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, renamable $rbx :: (store 8 into %stack.0) -; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, killed renamable $rbx(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $al :: (volatile load store 8 on %stack.0) +; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 2, 1, killed renamable $rbx(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $al ; CHECK-PREG: renamable $bpl = COPY killed $al ; CHECK-PREG: $rdi = COPY killed renamable $rbx ; CHECK-PREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp @@ -51,10 +49,7 @@ ; CHECK-VREG: %2:gr64 = COPY $rdx ; CHECK-VREG: %1:gr64 = COPY $rsi ; CHECK-VREG: %0:gr64 = COPY $rdi -; CHECK-VREG: MOV64mr %stack.1, 1, $noreg, 0, $noreg, %1 :: (store 8 into %stack.1) -; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %2 :: (store 8 into %stack.0) -; CHECK-VREG: MOV64mr %stack.2, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.2) -; CHECK-VREG: %3:gr64, %4:gr64, %5:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, %2(tied-def 0), 2, 0, 2, 0, 1, 8, %stack.1, 0, %1(tied-def 1), 1, 8, %stack.2, 0, %0(tied-def 2), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0), (volatile load store 8 on %stack.1), (volatile load store 8 on %stack.2) +; CHECK-VREG: %3:gr64, %4:gr64, %5:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 4, %2(tied-def 0), 2, 0, %1(tied-def 1), %0(tied-def 2), 2, 0, 2, 4, 0, 0, 1, 1, 2, 2, 3, 3, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-VREG: %6:gr32 = MOV32r0 implicit-def dead $eflags ; CHECK-VREG: %7:gr64 = SUBREG_TO_REG 0, killed %6, %subreg.sub_32bit ; CHECK-VREG: $rdi = COPY %5 @@ -68,10 +63,7 @@ ; CHECK-PREG: renamable $r14 = COPY $rdx ; CHECK-PREG: renamable $r15 = COPY $rsi ; CHECK-PREG: renamable $rbx = COPY $rdi -; CHECK-PREG: MOV64mr %stack.1, 1, $noreg, 0, $noreg, renamable $r15 :: (store 8 into %stack.1) -; CHECK-PREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, renamable $r14 :: (store 8 into %stack.0) -; CHECK-PREG: MOV64mr %stack.2, 1, $noreg, 0, $noreg, renamable $rbx :: (store 8 into %stack.2) -; CHECK-PREG: renamable $r14, renamable $r15, renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, killed renamable $r14(tied-def 0), 2, 0, 2, 0, 1, 8, %stack.1, 0, killed renamable $r15(tied-def 1), 1, 8, %stack.2, 0, killed renamable $rbx(tied-def 2), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0), (volatile load store 8 on %stack.1), (volatile load store 8 on %stack.2) +; CHECK-PREG: renamable $r14, renamable $r15, renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 4, killed renamable $r14(tied-def 0), 2, 0, killed renamable $r15(tied-def 1), killed renamable $rbx(tied-def 2), 2, 0, 2, 4, 0, 0, 1, 1, 2, 2, 3, 3, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-PREG: $rdi = COPY killed renamable $rbx ; CHECK-PREG: dead $esi = MOV32r0 implicit-def dead $eflags, implicit-def $rsi ; CHECK-PREG: $rdx = COPY killed renamable $r15 @@ -95,8 +87,7 @@ ; CHECK-VREG-LABEL: name: test_alloca ; CHECK-VREG: %0:gr64 = COPY $rdi ; CHECK-VREG: MOV64mr %stack.0.alloca, 1, $noreg, 0, $noreg, %0 :: (store 8 into %ir.alloca) -; CHECK-VREG: MOV64mr %stack.1, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.1) -; CHECK-VREG: %1:gr64 = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 1, 8, %stack.1, 0, %0(tied-def 0), 0, %stack.0.alloca, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $al :: (volatile load store 8 on %stack.1), (volatile load store 8 on %stack.0.alloca) +; CHECK-VREG: %1:gr64 = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 2, 1, %0(tied-def 0), 2, 1, 0, %stack.0.alloca, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $al :: (volatile load store 8 on %stack.0.alloca) ; CHECK-VREG: %2:gr8 = COPY $al ; CHECK-VREG: %3:gr64 = MOV64rm %stack.0.alloca, 1, $noreg, 0, $noreg :: (dereferenceable load 8 from %ir.alloca) ; CHECK-VREG: $rdi = COPY %1 @@ -105,8 +96,7 @@ ; CHECK-PREG-LABEL: name: test_alloca ; CHECK-PREG: renamable $rbx = COPY $rdi ; CHECK-PREG: MOV64mr %stack.0.alloca, 1, $noreg, 0, $noreg, renamable $rbx :: (store 8 into %ir.alloca) -; CHECK-PREG: MOV64mr %stack.1, 1, $noreg, 0, $noreg, renamable $rbx :: (store 8 into %stack.1) -; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 1, 8, %stack.1, 0, killed renamable $rbx(tied-def 0), 0, %stack.0.alloca, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def dead $al :: (volatile load store 8 on %stack.1), (volatile load store 8 on %stack.0.alloca) +; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 2, 1, killed renamable $rbx(tied-def 0), 2, 1, 0, %stack.0.alloca, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def dead $al :: (volatile load store 8 on %stack.0.alloca) ; CHECK-PREG: renamable $r14 = MOV64rm %stack.0.alloca, 1, $noreg, 0, $noreg :: (dereferenceable load 8 from %ir.alloca) ; CHECK-PREG: $rdi = COPY killed renamable $rbx ; CHECK-PREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp @@ -126,15 +116,13 @@ ; CHECK-VREG-LABEL: name: test_base_derived ; CHECK-VREG: %1:gr64 = COPY $rsi ; CHECK-VREG: %0:gr64 = COPY $rdi -; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) -; CHECK-VREG: %2:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, %1(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-VREG: %2:gr64, %3:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 2, %1(tied-def 0), %0(tied-def 1), 2, 0, 2, 1, 1, 0, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-VREG: $rdi = COPY %2 ; CHECK-VREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp ; CHECK-PREG-LABEL: name: test_base_derived ; CHECK-PREG: renamable $rbx = COPY $rsi -; CHECK-PREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, killed renamable $rdi :: (store 8 into %stack.0) -; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, killed renamable $rbx(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-PREG: renamable $rbx, dead renamable $r14 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 2, killed renamable $rbx(tied-def 0), killed renamable $r14(tied-def 1), 2, 0, 2, 1, 1, 0, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-PREG: $rdi = COPY killed renamable $rbx ; CHECK-PREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp @@ -149,9 +137,8 @@ ; CHECK-VREG-LABEL: name: test_deopt_gcpointer ; CHECK-VREG: %1:gr64 = COPY $rsi ; CHECK-VREG: %0:gr64 = COPY $rdi -; CHECK-VREG: MOV64mr %stack.1, 1, $noreg, 0, $noreg, %1 :: (store 8 into %stack.1) ; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) -; CHECK-VREG: %2:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 1, 8, %stack.1, 0, %1(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0), (volatile load store 8 on %stack.1) +; CHECK-VREG: %2:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 2, 1, %1(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) ; CHECK-VREG: $rdi = COPY %2 ; CHECK-VREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp ; CHECK-VREG: RET 0 @@ -159,7 +146,7 @@ ; CHECK-PREG-LABEL: name: test_deopt_gcpointer ; CHECK-PREG: renamable $rbx = COPY $rsi ; CHECK-PREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, killed renamable $rdi :: (store 8 into %stack.0) -; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 1, 8, %stack.1, 0, killed renamable $rbx(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0), (volatile load store 8 on %stack.1) +; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 2, 1, killed renamable $rbx(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) ; CHECK-PREG: $rdi = COPY killed renamable $rbx ; CHECK-PREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp @@ -173,16 +160,14 @@ define void @test_gcrelocate_uniqueing(i32 addrspace(1)* %ptr) gc "statepoint-example" { ; CHECK-VREG-LABEL: name: test_gcrelocate_uniqueing ; CHECK-VREG: %0:gr64 = COPY $rdi -; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) -; CHECK-VREG: %1:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 2, %0, 2, 4278124286, 1, 8, %stack.0, 0, %0(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-VREG: %1:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 2, %0, 2, 4278124286, 2, 1, %0(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-VREG: $rdi = COPY %1 ; CHECK-VREG: $rsi = COPY %1 ; CHECK-VREG: CALL64pcrel32 @consume2, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit-def $rsp, implicit-def $ssp ; CHECK-PREG-LABEL: name: test_gcrelocate_uniqueing ; CHECK-PREG: renamable $rbx = COPY $rdi -; CHECK-PREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, renamable $rbx :: (store 8 into %stack.0) -; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 2, killed renamable $rbx, 2, 4278124286, 1, 8, %stack.0, 0, renamable $rbx(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 2, killed renamable $rbx, 2, 4278124286, 2, 1, renamable $rbx(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-PREG: $rdi = COPY renamable $rbx ; CHECK-PREG: $rsi = COPY killed renamable $rbx ; CHECK-PREG: CALL64pcrel32 @consume2, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit killed $rsi, implicit-def $rsp, implicit-def $ssp @@ -198,9 +183,8 @@ define void @test_gcptr_uniqueing(i32 addrspace(1)* %ptr) gc "statepoint-example" { ; CHECK-VREG-LABEL: name: test_gcptr_uniqueing ; CHECK-VREG: %0:gr64 = COPY $rdi -; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) ; CHECK-VREG: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp -; CHECK-VREG: %1:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 2, %0, 2, 4278124286, 1, 8, %stack.0, 0, %0(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-VREG: %1:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 2, %0, 2, 4278124286, 2, 1, %0(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-VREG: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp ; CHECK-VREG: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp ; CHECK-VREG: $rdi = COPY %1 @@ -209,8 +193,7 @@ ; CHECK-PREG-LABEL: name: test_gcptr_uniqueing ; CHECK-PREG: renamable $rbx = COPY $rdi -; CHECK-PREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, renamable $rbx :: (store 8 into %stack.0) -; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 2, killed renamable $rbx, 2, 4278124286, 1, 8, %stack.0, 0, renamable $rbx(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 2, killed renamable $rbx, 2, 4278124286, 2, 1, renamable $rbx(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-PREG: $rdi = COPY renamable $rbx ; CHECK-PREG: $rsi = COPY killed renamable $rbx ; CHECK-PREG: CALL64pcrel32 @use1, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit killed $rsi, implicit-def $rsp, implicit-def $ssp @@ -230,9 +213,8 @@ ; CHECK-VREG: %1:gr32 = COPY $esi ; CHECK-VREG-NEXT: %0:gr64 = COPY $rdi ; CHECK-VREG-NEXT: %4:gr8 = COPY %1.sub_8bit -; CHECK-VREG-NEXT: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) ; CHECK-VREG-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp -; CHECK-VREG-NEXT: %2:gr64 = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, %0(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $al :: (volatile load store 8 on %stack.0) +; CHECK-VREG-NEXT: %2:gr64 = STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 2, 1, %0(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $al ; CHECK-VREG-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp ; CHECK-VREG-NEXT: %5:gr8 = COPY $al ; CHECK-VREG-NEXT: %3:gr8 = COPY %5 @@ -269,8 +251,8 @@ define i1 @duplicate_reloc() gc "statepoint-example" { ; CHECK-VREG-LABEL: name: duplicate_reloc ; CHECK-VREG: bb.0.entry: -; CHECK-VREG: STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, csr_64, implicit-def $rsp, implicit-def $ssp -; CHECK-VREG: STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, csr_64, implicit-def $rsp, implicit-def $ssp +; CHECK-VREG: STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 1, 2, 0, 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp +; CHECK-VREG: STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 1, 2, 0, 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-VREG: %0:gr8 = MOV8ri 1 ; CHECK-VREG: $al = COPY %0 ; CHECK-VREG: RET 0, $al @@ -294,7 +276,7 @@ ; CHECK-VREG-LABEL: name: test_vector ; CHECK-VREG: %0:vr128 = COPY $xmm0 ; CHECK-VREG: MOVAPSmr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 16 into %stack.0) -; CHECK-VREG: STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 1, 16, %stack.0, 0, 1, 16, %stack.0, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 16 on %stack.0) +; CHECK-VREG: STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 1, 1, 16, %stack.0, 0, 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 16 on %stack.0) ; CHECK-VREG: %1:vr128 = MOVAPSrm %stack.0, 1, $noreg, 0, $noreg :: (load 16 from %stack.0) ; CHECK-VREG: $xmm0 = COPY %1 ; CHECK-VREG: RET 0, $xmm0 @@ -314,13 +296,9 @@ ; CHECK-VREG: %2:gr64 = COPY $rdx ; CHECK-VREG: %1:gr64 = COPY $rsi ; CHECK-VREG: %0:gr64 = COPY $rdi -; CHECK-VREG: MOV64mr %stack.1, 1, $noreg, 0, $noreg, %3 :: (store 8 into %stack.1) -; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %4 :: (store 8 into %stack.0) -; CHECK-VREG: MOV64mr %stack.2, 1, $noreg, 0, $noreg, %2 :: (store 8 into %stack.2) -; CHECK-VREG: MOV64mr %stack.3, 1, $noreg, 0, $noreg, %1 :: (store 8 into %stack.3) -; CHECK-VREG: MOV64mr %stack.4, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.4) -; CHECK-VREG: %5:gr64, %6:gr64, %7:gr64, %8:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, %4(tied-def 0), 1, 8, %stack.1, 0, %3(tied-def 1), 1, 8, %stack.2, 0, %2(tied-def 2), 1, 8, %stack.3, 0, %1(tied-def 3), 1, 8, %stack.4, 0, 1, 8, %stack.4, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0), (volatile load store 8 on %stack.1), (volatile load store 8 on %stack.2), (volatile load store 8 on %stack.3), (volatile load store 8 on %stack.4) -; CHECK-VREG: %9:gr64 = MOV64rm %stack.4, 1, $noreg, 0, $noreg :: (load 8 from %stack.4) +; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) +; CHECK-VREG: %5:gr64, %6:gr64, %7:gr64, %8:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 0, 2, 5, %4(tied-def 0), %3(tied-def 1), %2(tied-def 2), %1(tied-def 3), 1, 8, %stack.0, 0, 2, 0, 2, 5, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-VREG: %9:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) ; CHECK-VREG: $rdi = COPY %9 ; CHECK-VREG: $rsi = COPY %8 ; CHECK-VREG: $rdx = COPY %7 @@ -345,19 +323,19 @@ define void @test_duplicate_ir_values() gc "statepoint-example" personality i32* ()* @fake_personality_function{ ;CHECK-VREG-LABEL: name: test_duplicate_ir_values ;CHECK-VREG: bb.0.entry: -;CHECK-VREG: %0:gr64 = STATEPOINT 1, 16, 5, %8, $edi, $rsi, $edx, $ecx, $r8d, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, %1(tied-def 0), csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $eax :: (volatile load store 8 on %stack.0) +;CHECK-VREG: %0:gr64 = STATEPOINT 1, 16, 5, %8, $edi, $rsi, $edx, $ecx, $r8d, 2, 0, 2, 0, 2, 0, 2, 1, killed %1(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def $eax ;CHECK-VREG: JMP_1 %bb.1 ;CHECK-VREG: bb.1.normal_continue: ;CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) ;CHECK-VREG: %13:gr32 = MOV32ri 10 ;CHECK-VREG: $edi = COPY %13 -;CHECK-VREG: STATEPOINT 2882400000, 0, 1, @__llvm_deoptimize, $edi, 2, 0, 2, 2, 2, 2, 1, 8, %stack.0, 0, 1, 8, %stack.0, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +;CHECK-VREG: STATEPOINT 2882400000, 0, 1, @__llvm_deoptimize, $edi, 2, 0, 2, 2, 2, 2, 1, 8, %stack.0, 0, 1, 8, %stack.0, 0, 2, 0, 2, 0, 2, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) ;CHECK-VREG: bb.2.exceptional_return (landing-pad): ;CHECK-VREG: EH_LABEL ;CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) ;CHECK-VREG: %12:gr32 = MOV32ri -271 ;CHECK-VREG: $edi = COPY %12 -;CHECK-VREG: STATEPOINT 2882400000, 0, 1, @__llvm_deoptimize, $edi, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +;CHECK-VREG: STATEPOINT 2882400000, 0, 1, @__llvm_deoptimize, $edi, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 2, 0, 2, 0, 2, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) entry: %local.0 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8 @@ -388,13 +366,11 @@ ;CHECK-VREG: %0:gr64 = COPY $rdi ;CHECK-VREG: TEST32rr %2, %2, implicit-def $eflags ;CHECK-VREG: %5:gr64 = CMOV64rr %1, %0, 4, implicit $eflags -;CHECK-VREG: MOV64mr %stack.1, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.1) -;CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %1 :: (store 8 into %stack.0) ;CHECK-VREG: %6:gr32 = MOV32r0 implicit-def dead $eflags ;CHECK-VREG: %7:gr64 = SUBREG_TO_REG 0, killed %6, %subreg.sub_32bit ;CHECK-VREG: $rdi = COPY %7 ;CHECK-VREG: $rsi = COPY %5 -;CHECK-VREG: %3:gr64, %4:gr64 = STATEPOINT 10, 0, 2, @bar, $rdi, $rsi, 2, 0, 2, 0, 2, 0, 1, 8, %stack.0, 0, %1(tied-def 0), 1, 8, %stack.1, 0, %0(tied-def 1), csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0), (volatile load store 8 on %stack.1) +;CHECK-VREG: %3:gr64, %4:gr64 = STATEPOINT 10, 0, 2, @bar, $rdi, $rsi, 2, 0, 2, 0, 2, 0, 2, 2, %1(tied-def 0), %0(tied-def 1), 2, 0, 2, 2, 0, 0, 1, 1, csr_64, implicit-def $rsp, implicit-def $ssp ;CHECK-VREG: TEST32rr %2, %2, implicit-def $eflags ;CHECK-VREG: %8:gr64 = CMOV64rr %3, %4, 4, implicit $eflags ;CHECK-VREG: $rax = COPY %8 diff --git a/llvm/test/CodeGen/X86/statepoint-vreg.ll b/llvm/test/CodeGen/X86/statepoint-vreg.ll --- a/llvm/test/CodeGen/X86/statepoint-vreg.ll +++ b/llvm/test/CodeGen/X86/statepoint-vreg.ll @@ -30,7 +30,6 @@ ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %rbp, -16 ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: movq %rdi, (%rsp) ; CHECK-NEXT: callq return_i1 ; CHECK-NEXT: .Ltmp0: ; CHECK-NEXT: movl %eax, %ebp @@ -62,17 +61,12 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: subq $32, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: .cfi_offset %rbx, -32 ; CHECK-NEXT: .cfi_offset %r14, -24 ; CHECK-NEXT: .cfi_offset %r15, -16 ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movq %rsi, %r15 ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: movq %rsi, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %rdx, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: callq func ; CHECK-NEXT: .Ltmp1: ; CHECK-NEXT: movq %rbx, %rdi @@ -81,8 +75,6 @@ ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: movq %r14, %r8 ; CHECK-NEXT: callq consume5 -; CHECK-NEXT: addq $32, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %r14 @@ -109,20 +101,19 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 -; CHECK-NEXT: subq $24, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) +; CHECK-NEXT: movq %rdi, {{[0-9]*}}(%rsp) ; CHECK-NEXT: callq return_i1 ; CHECK-NEXT: .Ltmp2: -; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; CHECK-NEXT: movq {{[0-9]*}}(%rsp), %r14 ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: callq consume ; CHECK-NEXT: movq %r14, %rax -; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: addq $8, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 @@ -143,20 +134,25 @@ define void @test_base_derived(i32 addrspace(1)* %base, i32 addrspace(1)* %derived) gc "statepoint-example" { ; CHECK-LABEL: test_base_derived: ; CHECK: # %bb.0: -; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: subq $16, %rsp +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 24 +; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset %rbx, -16 -; CHECK-NEXT: movq %rsi, %rbx -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) +; CHECK-NEXT: .cfi_offset %rbx, -24 +; CHECK-NEXT: .cfi_offset %r14, -16 +; CHECK-NEXT: movq %rsi, %rbx +; CHECK-NEXT: movq %rdi, %r14 ; CHECK-NEXT: callq func ; CHECK-NEXT: .Ltmp3: ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: callq consume -; CHECK-NEXT: addq $16, %rsp +; CHECK-NEXT: addq $8, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 24 +; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: popq %rbx +; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %base, i32 addrspace(1)* %derived)] @@ -175,7 +171,6 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rsi, %rbx -; CHECK-NEXT: movq %rsi, (%rsp) ; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: callq func ; CHECK-NEXT: .Ltmp4: @@ -198,18 +193,13 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: subq $16, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: callq func ; CHECK-NEXT: .Ltmp5: ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: callq consume2 -; CHECK-NEXT: addq $16, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq @@ -226,18 +216,13 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: subq $16, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: callq func ; CHECK-NEXT: .Ltmp6: ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: callq use1 -; CHECK-NEXT: addq $16, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq @@ -261,14 +246,11 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: subq $16, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -32 ; CHECK-NEXT: .cfi_offset %r14, -24 ; CHECK-NEXT: .cfi_offset %rbp, -16 ; CHECK-NEXT: movl %esi, %ebp ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: callq return_i1 ; CHECK-NEXT: .Ltmp7: ; CHECK-NEXT: testb $1, %bpl @@ -282,8 +264,6 @@ ; CHECK-NEXT: .LBB7_2: # %right ; CHECK-NEXT: movb $1, %al ; CHECK-NEXT: .LBB7_3: # %right -; CHECK-NEXT: addq $16, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %r14 @@ -365,8 +345,8 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 40 -; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -40 ; CHECK-NEXT: .cfi_offset %r12, -32 ; CHECK-NEXT: .cfi_offset %r14, -24 @@ -375,10 +355,6 @@ ; CHECK-NEXT: movq %rcx, %r15 ; CHECK-NEXT: movq %rdx, %r12 ; CHECK-NEXT: movq %rsi, %rbx -; CHECK-NEXT: movq %rcx, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %r8, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %rdx, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %rsi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq %rdi, (%rsp) ; CHECK-NEXT: callq func ; CHECK-NEXT: .Ltmp11: @@ -388,7 +364,7 @@ ; CHECK-NEXT: movq %r15, %rcx ; CHECK-NEXT: movq %r14, %r8 ; CHECK-NEXT: callq consume5 -; CHECK-NEXT: addq $40, %rsp +; CHECK-NEXT: addq $8, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 40 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 32 @@ -417,14 +393,12 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 -; CHECK-NEXT: subq $24, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movq %rsi, %r14 ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %rsi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: .Ltmp12: ; CHECK-NEXT: callq some_call ; CHECK-NEXT: .Ltmp15: @@ -432,7 +406,7 @@ ; CHECK-NEXT: # %bb.1: # %normal_return ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: .LBB11_2: # %normal_return -; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: addq $8, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 @@ -440,7 +414,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq ; CHECK-NEXT: .LBB11_3: # %exceptional_return -; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .Ltmp14: ; CHECK-NEXT: movq %r14, %rax ; CHECK-NEXT: jmp .LBB11_2 @@ -476,8 +450,8 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 40 -; CHECK-NEXT: subq $24, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -40 ; CHECK-NEXT: .cfi_offset %r14, -32 ; CHECK-NEXT: .cfi_offset %r15, -24 @@ -488,8 +462,6 @@ ; CHECK-NEXT: testb $1, %r14b ; CHECK-NEXT: je .LBB12_2 ; CHECK-NEXT: # %bb.1: # %left -; CHECK-NEXT: movq %rbp, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %rbx, {{[0-9]+}}(%rsp) ; CHECK-NEXT: .Ltmp19: ; CHECK-NEXT: movq %rbp, %rdi ; CHECK-NEXT: callq some_call @@ -498,8 +470,6 @@ ; CHECK-NEXT: jmp .LBB12_4 ; CHECK-NEXT: .LBB12_2: # %right ; CHECK-NEXT: movq %rcx, %r15 -; CHECK-NEXT: movq %rbx, {{[0-9]+}}(%rsp) -; CHECK-NEXT: movq %rcx, {{[0-9]+}}(%rsp) ; CHECK-NEXT: .Ltmp16: ; CHECK-NEXT: movq %rbp, %rdi ; CHECK-NEXT: callq some_call @@ -513,7 +483,7 @@ ; CHECK-NEXT: .LBB12_6: # %exceptional_return.left ; CHECK-NEXT: movq %rbp, %rax ; CHECK-NEXT: .LBB12_7: # %exceptional_return.left -; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: addq $8, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 40 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 32 @@ -525,7 +495,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq ; CHECK-NEXT: .LBB12_8: # %exceptional_return.right -; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .Ltmp18: ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: jmp .LBB12_7 @@ -603,20 +573,18 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 -; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: subq $24, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %rbp, -16 ; CHECK-NEXT: movq %rsi, %rbx ; CHECK-NEXT: movl %edi, %ebp ; CHECK-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill -; CHECK-NEXT: movq %rsi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: callq consume3 ; CHECK-NEXT: .Ltmp25: ; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: cvtsi2sd %ebp, %xmm0 ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; CHECK-NEXT: movq %rbx, {{[0-9]+}}(%rsp) ; CHECK-NEXT: nopl 8(%rax,%rax) ; CHECK-NEXT: .Ltmp26: ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload @@ -625,7 +593,6 @@ ; CHECK-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss %xmm0, (%rsp) -; CHECK-NEXT: movq %rbx, {{[0-9]+}}(%rsp) ; CHECK-NEXT: nopl 8(%rax,%rax) ; CHECK-NEXT: .Ltmp27: ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload @@ -634,7 +601,6 @@ ; CHECK-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss %xmm0, (%rsp) -; CHECK-NEXT: movq %rbx, {{[0-9]+}}(%rsp) ; CHECK-NEXT: nopl 8(%rax,%rax) ; CHECK-NEXT: .Ltmp28: ; CHECK-NEXT: xorl %eax, %eax @@ -650,7 +616,7 @@ ; CHECK-NEXT: movss %xmm0, (%rsp) ; CHECK-NEXT: nopl 8(%rax,%rax) ; CHECK-NEXT: .Ltmp29: -; CHECK-NEXT: addq $40, %rsp +; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 diff --git a/llvm/test/CodeGen/X86/statepoint-vreg.mir b/llvm/test/CodeGen/X86/statepoint-vreg.mir --- a/llvm/test/CodeGen/X86/statepoint-vreg.mir +++ b/llvm/test/CodeGen/X86/statepoint-vreg.mir @@ -146,7 +146,7 @@ %1:gr64 = COPY $rsi %0:gr64 = COPY $rdi ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %2:gr64, %3:gr64 = STATEPOINT 2882400000, 0, 0, @bar, 2, 0, 2, 0, 2, 1, 2, 0, %1, %1(tied-def 0), %0, %0(tied-def 1), csr_64, implicit-def $rsp, implicit-def $ssp + %2:gr64, %3:gr64 = STATEPOINT 2882400000, 0, 0, @bar, 2, 0, 2, 0, 2, 1, 2, 0, 2, 2, %1(tied-def 0), %0(tied-def 1), 2, 0, 2, 2, 0, 0, 1, 1, csr_64, implicit-def $rsp, implicit-def $ssp ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp %4:gr32 = MOV32rm killed %3, 1, $noreg, 0, $noreg :: (load 4 from %ir.rel1, addrspace 1) %5:gr32 = ADD32rm %4, killed %2, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.rel2, addrspace 1)