diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h --- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h +++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h @@ -123,11 +123,6 @@ return StackRealignable; } - /// Return the skew that has to be applied to stack alignment under - /// certain conditions (e.g. stack was adjusted before function \p MF - /// was called). - virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const; - /// This method returns whether or not it is safe for an object with the /// given stack id to be bundled into the local area. virtual bool isStackIdSafeForLocalArea(unsigned StackId) const { diff --git a/llvm/include/llvm/IR/CallingConv.h b/llvm/include/llvm/IR/CallingConv.h --- a/llvm/include/llvm/IR/CallingConv.h +++ b/llvm/include/llvm/IR/CallingConv.h @@ -172,13 +172,8 @@ /// in SSE registers. X86_VectorCall = 80, - /// Calling convention used by HipHop Virtual Machine (HHVM) to - /// perform calls to and from translation cache, and for calling PHP - /// functions. - /// HHVM calling convention supports tail/sibling call elimination. + /// Placeholders for HHVM calling conventions (deprecated, removed). HHVM = 81, - - /// HHVM calling convention for invoking C/C++ helpers. HHVM_C = 82, /// X86_INTR - x86 hardware interrupt context. Callee may take one or two diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp --- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp +++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp @@ -657,7 +657,7 @@ /// AdjustStackOffset - Helper function used to adjust the stack frame offset. static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, bool StackGrowsDown, int64_t &Offset, - Align &MaxAlign, unsigned Skew) { + Align &MaxAlign) { // If the stack grows down, add the object size to find the lowest address. if (StackGrowsDown) Offset += MFI.getObjectSize(FrameIdx); @@ -669,7 +669,7 @@ MaxAlign = std::max(MaxAlign, Alignment); // Adjust to alignment boundary. - Offset = alignTo(Offset, Alignment, Skew); + Offset = alignTo(Offset, Alignment); if (StackGrowsDown) { LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset @@ -793,11 +793,10 @@ static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs, SmallSet &ProtectedObjs, MachineFrameInfo &MFI, bool StackGrowsDown, - int64_t &Offset, Align &MaxAlign, - unsigned Skew) { + int64_t &Offset, Align &MaxAlign) { for (int i : UnassignedObjs) { - AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); + AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign); ProtectedObjs.insert(i); } } @@ -823,9 +822,6 @@ && "Local area offset should be in direction of stack growth"); int64_t Offset = LocalAreaOffset; - // Skew to be applied to alignment. - unsigned Skew = TFI.getStackAlignmentSkew(MF); - #ifdef EXPENSIVE_CHECKS for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) if (!MFI.isDeadObjectIndex(i) && @@ -873,8 +869,7 @@ if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex)) continue; - AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign, - Skew); + AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign); } } @@ -895,7 +890,7 @@ SmallVector SFIs; RS->getScavengingFrameIndices(SFIs); for (int SFI : SFIs) - AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); + AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); } // FIXME: Once this is working, then enable flag will change to a target @@ -906,7 +901,7 @@ Align Alignment = MFI.getLocalFrameMaxAlign(); // Adjust to alignment boundary. - Offset = alignTo(Offset, Alignment, Skew); + Offset = alignTo(Offset, Alignment); LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); @@ -952,8 +947,8 @@ "Stack protector on non-default stack expected to not be " "pre-allocated by LocalStackSlotPass."); } else if (!MFI.getUseLocalStackAllocationBlock()) { - AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign, - Skew); + AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, + MaxAlign); } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) { llvm_unreachable( "Stack protector not pre-allocated by LocalStackSlotPass."); @@ -1001,11 +996,11 @@ "LocalStackSlotPass."); AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, - Offset, MaxAlign, Skew); + Offset, MaxAlign); AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, - Offset, MaxAlign, Skew); + Offset, MaxAlign); AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, - Offset, MaxAlign, Skew); + Offset, MaxAlign); } SmallVector ObjectsToAllocate; @@ -1036,7 +1031,7 @@ // Allocate the EH registration node first if one is present. if (EHRegNodeFrameIndex != std::numeric_limits::max()) AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, - MaxAlign, Skew); + MaxAlign); // Give the targets a chance to order the objects the way they like it. if (MF.getTarget().getOptLevel() != CodeGenOpt::None && @@ -1058,7 +1053,7 @@ for (auto &Object : ObjectsToAllocate) if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, StackBytesFree)) - AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); + AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign); // Make sure the special register scavenging spill slot is closest to the // stack pointer. @@ -1066,7 +1061,7 @@ SmallVector SFIs; RS->getScavengingFrameIndices(SFIs); for (int SFI : SFIs) - AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); + AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); } if (!TFI.targetHandlesStackFrameRounding()) { @@ -1092,7 +1087,7 @@ // SP not FP. Align to MaxAlign so this works. StackAlign = std::max(StackAlign, MaxAlign); int64_t OffsetBeforeAlignment = Offset; - Offset = alignTo(Offset, StackAlign, Skew); + Offset = alignTo(Offset, StackAlign); // If we have increased the offset to fulfill the alignment constrants, // then the scavenging spill slots may become harder to reach from the diff --git a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp --- a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp +++ b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp @@ -17,7 +17,6 @@ #include "llvm/CodeGen/TargetFrameLowering.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/Attributes.h" -#include "llvm/IR/CallingConv.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstrTypes.h" #include "llvm/MC/MCAsmInfo.h" @@ -131,16 +130,6 @@ } } -unsigned TargetFrameLowering::getStackAlignmentSkew( - const MachineFunction &MF) const { - // When HHVM function is called, the stack is skewed as the return address - // is removed from the stack before we enter the function. - if (LLVM_UNLIKELY(MF.getFunction().getCallingConv() == CallingConv::HHVM)) - return MF.getTarget().getAllocaPointerSize(); - - return 0; -} - bool TargetFrameLowering::allocateScavengingFrameIndexesNearIncomingSP( const MachineFunction &MF) const { if (!hasFP(MF)) diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td --- a/llvm/lib/Target/X86/X86CallingConv.td +++ b/llvm/lib/Target/X86/X86CallingConv.td @@ -430,16 +430,6 @@ CCCustom<"CC_X86_AnyReg_Error"> ]>; -// X86-64 HHVM return-value convention. -def RetCC_X86_64_HHVM: CallingConv<[ - // Promote all types to i64 - CCIfType<[i8, i16, i32], CCPromoteToType>, - - // Return: could return in any GP register save RSP and R12. - CCIfType<[i64], CCAssignToReg<[RBX, RBP, RDI, RSI, RDX, RCX, R8, R9, - RAX, R10, R11, R13, R14, R15]>> -]>; - defm X86_32_RegCall : X86_RegCall_base; @@ -483,9 +473,6 @@ // Handle Vectorcall CC CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo>, - // Handle HHVM calls. - CCIfCC<"CallingConv::HHVM", CCDelegateTo>, - CCIfCC<"CallingConv::X86_RegCall", CCIfSubtarget<"isTargetWin64()", CCDelegateTo>>, @@ -603,23 +590,6 @@ CCAssignToStack<64, 64>> ]>; -// Calling convention for X86-64 HHVM. -def CC_X86_64_HHVM : CallingConv<[ - // Use all/any GP registers for args, except RSP. - CCIfType<[i64], CCAssignToReg<[RBX, R12, RBP, R15, - RDI, RSI, RDX, RCX, R8, R9, - RAX, R10, R11, R13, R14]>> -]>; - -// Calling convention for helper functions in HHVM. -def CC_X86_64_HHVM_C : CallingConv<[ - // Pass the first argument in RBP. - CCIfType<[i64], CCAssignToReg<[RBP]>>, - - // Otherwise it's the same as the regular C calling convention. - CCDelegateTo -]>; - // Calling convention used on Win64 def CC_X86_Win64_C : CallingConv<[ // FIXME: Handle varargs. @@ -1097,8 +1067,6 @@ CCIfCC<"CallingConv::Win64", CCDelegateTo>, CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo>, CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo>, - CCIfCC<"CallingConv::HHVM", CCDelegateTo>, - CCIfCC<"CallingConv::HHVM_C", CCDelegateTo>, CCIfCC<"CallingConv::X86_RegCall", CCIfSubtarget<"isTargetWin64()", CCDelegateTo>>, CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo>, @@ -1210,9 +1178,6 @@ (sequence "ZMM%u", 16, 31), K4, K5, K6, K7)>; -// Only R12 is preserved for PHP calls in HHVM. -def CSR_64_HHVM : CalleeSavedRegs<(add R12)>; - // Register calling convention preserves few GPR and XMM8-15 def CSR_32_RegCall_NoSSE : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>; def CSR_32_RegCall : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE, diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3419,8 +3419,7 @@ static bool canGuaranteeTCO(CallingConv::ID CC) { return (CC == CallingConv::Fast || CC == CallingConv::GHC || CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE || - CC == CallingConv::HHVM || CC == CallingConv::Tail || - CC == CallingConv::SwiftTail); + CC == CallingConv::Tail || CC == CallingConv::SwiftTail); } /// Return true if we might ever do TCO for calls with this calling convention. diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp --- a/llvm/lib/Target/X86/X86RegisterInfo.cpp +++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp @@ -329,8 +329,6 @@ return CSR_64_Intel_OCL_BI_SaveList; break; } - case CallingConv::HHVM: - return CSR_64_HHVM_SaveList; case CallingConv::X86_RegCall: if (Is64Bit) { if (IsWin64) { @@ -451,8 +449,6 @@ return CSR_64_Intel_OCL_BI_RegMask; break; } - case CallingConv::HHVM: - return CSR_64_HHVM_RegMask; case CallingConv::X86_RegCall: if (Is64Bit) { if (IsWin64) { diff --git a/llvm/test/CodeGen/X86/hhvm-cc.ll b/llvm/test/CodeGen/X86/hhvm-cc.ll deleted file mode 100644 --- a/llvm/test/CodeGen/X86/hhvm-cc.ll +++ /dev/null @@ -1,241 +0,0 @@ -; RUN: llc < %s | FileCheck %s - -target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -declare hhvmcc i64 @bar(i64, i64, i64) nounwind - -; Simply check we can modify %rbx and %rbp before returning via call to bar. -define hhvmcc i64 @foo(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: foo: -; CHECK-DAG: movl $1, %ebx -; CHECK-DAG: movl $3, %ebp -; CHECK: jmp bar - %ret = musttail call hhvmcc i64 @bar(i64 1, i64 %b, i64 3) - ret i64 %ret -} - -; Check that we can read and modify %rbx returned from PHP function. -define hhvmcc i64 @mod_return(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: mod_return: -; CHECK-NEXT: {{^#.*}} -; CHECK-NEXT: callq bar -; CHECK-NEXT: incq %rbx - %tmp = call hhvmcc i64 @bar(i64 %a, i64 %b, i64 %c) - %retval = add i64 %tmp, 1 - ret i64 %retval -} - -%rettype = type { i64, i64, i64, i64, i64, i64, i64, - i64, i64, i64, i64, i64, i64, i64 -} - -; Check that we can return up to 14 64-bit args in registers. -define hhvmcc %rettype @return_all(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: return_all: -; CHECK-DAG: movl $1, %ebx -; CHECK-DAG: movl $2, %ebp -; CHECK-DAG: movl $3, %edi -; CHECK-DAG: movl $4, %esi -; CHECK-DAG: movl $5, %edx -; CHECK-DAG: movl $6, %ecx -; CHECK-DAG: movl $7, %r8 -; CHECK-DAG: movl $8, %r9 -; CHECK-DAG: movl $9, %eax -; CHECK-DAG: movl $10, %r10 -; CHECK-DAG: movl $11, %r11 -; CHECK-DAG: movl $12, %r13 -; CHECK-DAG: movl $13, %r14 -; CHECK-DAG: movl $14, %r15 -; CHECK: retq - %r1 = insertvalue %rettype zeroinitializer, i64 1, 0 - %r2 = insertvalue %rettype %r1, i64 2, 1 - %r3 = insertvalue %rettype %r2, i64 3, 2 - %r4 = insertvalue %rettype %r3, i64 4, 3 - %r5 = insertvalue %rettype %r4, i64 5, 4 - %r6 = insertvalue %rettype %r5, i64 6, 5 - %r7 = insertvalue %rettype %r6, i64 7, 6 - %r8 = insertvalue %rettype %r7, i64 8, 7 - %r9 = insertvalue %rettype %r8, i64 9, 8 - %r10 = insertvalue %rettype %r9, i64 10, 9 - %r11 = insertvalue %rettype %r10, i64 11, 10 - %r12 = insertvalue %rettype %r11, i64 12, 11 - %r13 = insertvalue %rettype %r12, i64 13, 12 - %r14 = insertvalue %rettype %r13, i64 14, 13 - ret %rettype %r14 -} - -declare hhvmcc void @return_all_tc(i64, i64, i64, i64, i64, i64, i64, i64, - i64, i64, i64, i64, i64, i64, i64) - -; Check that we can return up to 14 64-bit args in registers via tail call. -define hhvmcc void @test_return_all_tc(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: test_return_all_tc: -; CHECK-NEXT: {{^#.*}} -; CHECK-DAG: movl $1, %ebx -; CHECK-DAG: movl $3, %ebp -; CHECK-DAG: movl $4, %r15 -; CHECK-DAG: movl $5, %edi -; CHECK-DAG: movl $6, %esi -; CHECK-DAG: movl $7, %edx -; CHECK-DAG: movl $8, %ecx -; CHECK-DAG: movl $9, %r8 -; CHECK-DAG: movl $10, %r9 -; CHECK-DAG: movl $11, %eax -; CHECK-DAG: movl $12, %r10 -; CHECK-DAG: movl $13, %r11 -; CHECK-DAG: movl $14, %r13 -; CHECK-DAG: movl $15, %r14 -; CHECK: jmp return_all_tc - tail call hhvmcc void @return_all_tc( - i64 1, i64 %b, i64 3, i64 4, i64 5, i64 6, i64 7, - i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15) - ret void -} - -declare hhvmcc {i64, i64} @php_short(i64, i64, i64, i64) - -define hhvmcc i64 @test_php_short(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: test_php_short: -; CHECK-NEXT: {{^#.*}} -; CHECK-NEXT: movl $42, %r15 -; CHECK-NEXT: callq php_short -; CHECK-NEXT: leaq (%rbp,%r12), %rbx -; CHECK-NEXT: retq - %pair = call hhvmcc {i64, i64} @php_short(i64 %a, i64 %b, i64 %c, i64 42) - %fp = extractvalue {i64, i64} %pair, 1 - %rv = add i64 %fp, %b - ret i64 %rv -} - -declare hhvmcc %rettype @php_all(i64, i64, i64, i64, i64, i64, i64, - i64, i64, i64, i64, i64, i64, i64, i64) - -; Check that we can pass 15 arguments in registers. -; Also check that %r12 (2nd arg) is not spilled. -define hhvmcc i64 @test_php_all(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: test_php_all: -; CHECK-NEXT: {{^#.*}} -; CHECK-NOT: sub -; CHECK-NOT: sub -; CHECK-DAG: movl $1, %ebx -; CHECK-DAG: movl $3, %ebp -; CHECK-DAG: movl $4, %r15 -; CHECK-DAG: movl $5, %edi -; CHECK-DAG: movl $6, %esi -; CHECK-DAG: movl $7, %edx -; CHECK-DAG: movl $8, %ecx -; CHECK-DAG: movl $9, %r8 -; CHECK-DAG: movl $10, %r9 -; CHECK-DAG: movl $11, %eax -; CHECK-DAG: movl $12, %r10 -; CHECK-DAG: movl $13, %r11 -; CHECK-DAG: movl $14, %r13 -; CHECK-DAG: movl $15, %r14 -; CHECK: callq php_all - %pair = call hhvmcc %rettype @php_all( - i64 1, i64 %b, i64 3, i64 4, i64 5, i64 6, i64 7, - i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15) - %fp = extractvalue %rettype %pair, 1 - %rv = add i64 %fp, %b - ret i64 %rv -} - -declare hhvmcc void @svcreq(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, - i64, i64) - -define hhvmcc void @test_svcreq(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: test_svcreq: -; CHECK-DAG: movl $42, %r10 -; CHECK-DAG: movl $1, %edi -; CHECK-DAG: movl $2, %esi -; CHECK-DAG: movl $3, %edx -; CHECK-DAG: movl $4, %ecx -; CHECK-DAG: movl $5, %r8 -; CHECK-DAG: movl $6, %r9 -; CHECK: jmp svcreq - tail call hhvmcc void @svcreq(i64 %a, i64 %b, i64 %c, i64 undef, i64 1, - i64 2, i64 3, i64 4, i64 5, i64 6, i64 undef, - i64 42) - ret void -} - -declare hhvm_ccc void @helper_short(i64, i64, i64, i64, i64, i64, i64) - -; Pass all arguments in registers and check that we don't adjust stack -; for the call. -define hhvmcc void @test_helper_short(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: test_helper_short: -; CHECK-NOT: push -; CHECK-NOT: sub -; CHECK-DAG: movl $1, %edi -; CHECK-DAG: movl $2, %esi -; CHECK-DAG: movl $3, %edx -; CHECK-DAG: movl $4, %ecx -; CHECK-DAG: movl $5, %r8 -; CHECK-DAG: movl $6, %r9 -; CHECK: callq helper_short - call hhvm_ccc void @helper_short(i64 %c, i64 1, i64 2, i64 3, i64 4, - i64 5, i64 6) - ret void -} - -declare hhvm_ccc void @helper(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64) - -define hhvmcc void @test_helper(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: test_helper: -; CHECK-DAG: movl $1, %edi -; CHECK-DAG: movl $2, %esi -; CHECK-DAG: movl $3, %edx -; CHECK-DAG: movl $4, %ecx -; CHECK-DAG: movl $5, %r8 -; CHECK-DAG: movl $6, %r9 -; CHECK: callq helper - call hhvm_ccc void @helper(i64 %c, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, - i64 7, i64 8, i64 9) - ret void -} - -; When we enter function with HHVM calling convention, the stack is aligned -; at 16 bytes. This means we align objects on the stack differently and -; adjust the stack differently for calls. -declare hhvm_ccc void @stack_helper(i64, i64, i64) -declare hhvm_ccc void @stack_helper2(<2 x double>, i64) - -define hhvmcc void @test_stack_helper(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: test_stack_helper: -; CHECK-NOT: push -; CHECK: subq $32, %rsp -; CHECK: movaps 16(%rsp), %xmm0 -; CHECK: callq stack_helper2 - %t1 = alloca <2 x double>, align 16 - %t2 = alloca i64, align 8 - %t3 = alloca i64, align 8 - %load3 = load i64, i64 *%t3 - call hhvm_ccc void @stack_helper(i64 %c, i64 %load3, i64 42) - %load = load <2 x double>, <2 x double> *%t1 - %load2 = load i64, i64 *%t2 - call hhvm_ccc void @stack_helper2(<2 x double> %load, i64 %load2) - ret void -} - -; Check that we are not adjusting the stack before calling the helper. -define hhvmcc void @test_stack_helper2(i64 %a, i64 %b, i64 %c) nounwind { -entry: -; CHECK-LABEL: test_stack_helper2: -; CHECK-NOT: push -; CHECK-NOT: subq - call hhvm_ccc void @stack_helper(i64 %c, i64 7, i64 42) - ret void -} - diff --git a/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir b/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir --- a/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir +++ b/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir @@ -29,11 +29,11 @@ bb.0.entry: liveins: $rdi - ; CHECK: renamable $r12 = STATEPOINT 0, 0, 0, target-flags(x86-plt) 0, 2, 0, 2, 0, 2, 0, 2, 1, renamable $r12(tied-def 0) + ; CHECK: renamable $rbx = STATEPOINT 0, 0, 0, target-flags(x86-plt) 0, 2, 0, 2, 0, 2, 0, 2, 1, renamable $rbx(tied-def 0) %1:gr64 = COPY $rdi ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %1:gr64 = STATEPOINT 0, 0, 0, target-flags(x86-plt) 0, 2, 0, 2, 0, 2, 0, 2, 1, %1(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64_rt_allregs, csr_64_hhvm, implicit-def $rsp, implicit-def $ssp + %1:gr64 = STATEPOINT 0, 0, 0, target-flags(x86-plt) 0, 2, 0, 2, 0, 2, 0, 2, 1, %1(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp $rax = COPY %1 RET 0, killed $rax diff --git a/llvm/test/Feature/callingconventions.ll b/llvm/test/Feature/callingconventions.ll --- a/llvm/test/Feature/callingconventions.ll +++ b/llvm/test/Feature/callingconventions.ll @@ -59,11 +59,4 @@ ret void } -declare hhvm_ccc void @hhvm_c_callee() - -define hhvmcc void @hhvm_caller() { - call hhvm_ccc void @hhvm_c_callee() - ret void -} - declare i32 @__gxx_personality_v0(...)