Index: llvm/trunk/include/llvm/IR/CallingConv.h =================================================================== --- llvm/trunk/include/llvm/IR/CallingConv.h +++ llvm/trunk/include/llvm/IR/CallingConv.h @@ -143,11 +143,15 @@ /// System V ABI, used on most non-Windows systems. X86_64_SysV = 78, - /// \brief The C convention as implemented on Windows/x86-64. This - /// convention differs from the more common \c X86_64_SysV convention - /// in a number of ways, most notably in that XMM registers used to pass - /// arguments are shadowed by GPRs, and vice versa. - X86_64_Win64 = 79, + /// \brief The C convention as implemented on Windows/x86-64 and + /// AArch64. This convention differs from the more common + /// \c X86_64_SysV convention in a number of ways, most notably in + /// that XMM registers used to pass arguments are shadowed by GPRs, + /// and vice versa. + /// On AArch64, this is identical to the normal C (AAPCS) calling + /// convention for normal functions, but floats are passed in integer + /// registers to variadic functions. + Win64 = 79, /// \brief MSVC calling convention that passes vectors and vector aggregates /// in SSE registers. Index: llvm/trunk/lib/AsmParser/LLLexer.cpp =================================================================== --- llvm/trunk/lib/AsmParser/LLLexer.cpp +++ llvm/trunk/lib/AsmParser/LLLexer.cpp @@ -588,7 +588,7 @@ KEYWORD(spir_func); KEYWORD(intel_ocl_bicc); KEYWORD(x86_64_sysvcc); - KEYWORD(x86_64_win64cc); + KEYWORD(win64cc); KEYWORD(x86_regcallcc); KEYWORD(webkit_jscc); KEYWORD(swiftcc); Index: llvm/trunk/lib/AsmParser/LLParser.cpp =================================================================== --- llvm/trunk/lib/AsmParser/LLParser.cpp +++ llvm/trunk/lib/AsmParser/LLParser.cpp @@ -1670,7 +1670,7 @@ /// ::= 'spir_func' /// ::= 'spir_kernel' /// ::= 'x86_64_sysvcc' -/// ::= 'x86_64_win64cc' +/// ::= 'win64cc' /// ::= 'webkit_jscc' /// ::= 'anyregcc' /// ::= 'preserve_mostcc' @@ -1712,7 +1712,7 @@ case lltok::kw_spir_func: CC = CallingConv::SPIR_FUNC; break; case lltok::kw_intel_ocl_bicc: CC = CallingConv::Intel_OCL_BI; break; case lltok::kw_x86_64_sysvcc: CC = CallingConv::X86_64_SysV; break; - case lltok::kw_x86_64_win64cc: CC = CallingConv::X86_64_Win64; break; + case lltok::kw_win64cc: CC = CallingConv::Win64; break; case lltok::kw_webkit_jscc: CC = CallingConv::WebKit_JS; break; case lltok::kw_anyregcc: CC = CallingConv::AnyReg; break; case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break; Index: llvm/trunk/lib/AsmParser/LLToken.h =================================================================== --- llvm/trunk/lib/AsmParser/LLToken.h +++ llvm/trunk/lib/AsmParser/LLToken.h @@ -141,7 +141,7 @@ kw_spir_kernel, kw_spir_func, kw_x86_64_sysvcc, - kw_x86_64_win64cc, + kw_win64cc, kw_webkit_jscc, kw_anyregcc, kw_swiftcc, Index: llvm/trunk/lib/IR/AsmWriter.cpp =================================================================== --- llvm/trunk/lib/IR/AsmWriter.cpp +++ llvm/trunk/lib/IR/AsmWriter.cpp @@ -365,7 +365,7 @@ case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break; case CallingConv::PTX_Device: Out << "ptx_device"; break; case CallingConv::X86_64_SysV: Out << "x86_64_sysvcc"; break; - case CallingConv::X86_64_Win64: Out << "x86_64_win64cc"; break; + case CallingConv::Win64: Out << "win64cc"; break; case CallingConv::SPIR_FUNC: Out << "spir_func"; break; case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break; case CallingConv::Swift: Out << "swiftcc"; break; Index: llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -958,7 +958,8 @@ unsigned GPRSaveSize = AFI->getVarArgsGPRSize(); const AArch64Subtarget &Subtarget = MF.getSubtarget(); - if (Subtarget.isTargetWindows()) + bool IsWin64 = Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()); + if (IsWin64) Offset -= alignTo(GPRSaveSize, 16); for (unsigned i = 0; i < Count; ++i) { Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2655,6 +2655,8 @@ if (!Subtarget->isTargetDarwin()) return CC_AArch64_AAPCS; return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS; + case CallingConv::Win64: + return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS; } } @@ -2670,6 +2672,7 @@ SelectionDAG &DAG, SmallVectorImpl &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); + bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()); // Assign locations to all of the incoming arguments. SmallVector ArgLocs; @@ -2826,7 +2829,7 @@ // varargs AArch64FunctionInfo *FuncInfo = MF.getInfo(); if (isVarArg) { - if (!Subtarget->isTargetDarwin()) { + if (!Subtarget->isTargetDarwin() || IsWin64) { // The AAPCS variadic function ABI is identical to the non-variadic // one. As a result there may be more arguments in registers and we should // save them for future reference. @@ -2873,6 +2876,7 @@ MachineFrameInfo &MFI = MF.getFrameInfo(); AArch64FunctionInfo *FuncInfo = MF.getInfo(); auto PtrVT = getPointerTy(DAG.getDataLayout()); + bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()); SmallVector MemOps; @@ -2885,7 +2889,7 @@ unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR); int GPRIdx = 0; if (GPRSaveSize != 0) { - if (Subtarget->isTargetWindows()) + if (IsWin64) GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false); else GPRIdx = MFI.CreateStackObject(GPRSaveSize, 8, false); @@ -2897,7 +2901,7 @@ SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); SDValue Store = DAG.getStore( Val.getValue(1), DL, Val, FIN, - Subtarget->isTargetWindows() + IsWin64 ? MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), GPRIdx, (i - FirstVariadicGPR) * 8) @@ -2910,7 +2914,7 @@ FuncInfo->setVarArgsGPRIndex(GPRIdx); FuncInfo->setVarArgsGPRSize(GPRSaveSize); - if (Subtarget->hasFPARMv8() && !Subtarget->isTargetWindows()) { + if (Subtarget->hasFPARMv8() && !IsWin64) { static const MCPhysReg FPRArgRegs[] = { AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7}; @@ -4588,7 +4592,9 @@ SDValue AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { - if (Subtarget->isTargetWindows()) + MachineFunction &MF = DAG.getMachineFunction(); + + if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv())) return LowerWin64_VASTART(Op, DAG); else if (Subtarget->isTargetDarwin()) return LowerDarwin_VASTART(Op, DAG); Index: llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h +++ llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h @@ -306,6 +306,17 @@ bool enableEarlyIfConversion() const override; std::unique_ptr getCustomPBQPConstraints() const override; + + bool isCallingConvWin64(CallingConv::ID CC) const { + switch (CC) { + case CallingConv::C: + return isTargetWindows(); + case CallingConv::Win64: + return true; + default: + return false; + } + } }; } // End llvm namespace Index: llvm/trunk/lib/Target/X86/X86CallingConv.td =================================================================== --- llvm/trunk/lib/Target/X86/X86CallingConv.td +++ llvm/trunk/lib/Target/X86/X86CallingConv.td @@ -448,7 +448,7 @@ CCIfCC<"CallingConv::Swift", CCDelegateTo>, // Handle explicit CC selection - CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo>, + CCIfCC<"CallingConv::Win64", CCDelegateTo>, CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo>, // Handle Vectorcall CC @@ -1004,7 +1004,7 @@ CCIfCC<"CallingConv::HiPE", CCDelegateTo>, CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo>, CCIfCC<"CallingConv::AnyReg", CCDelegateTo>, - CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo>, + CCIfCC<"CallingConv::Win64", CCDelegateTo>, CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo>, CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo>, CCIfCC<"CallingConv::HHVM", CCDelegateTo>, Index: llvm/trunk/lib/Target/X86/X86FastISel.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86FastISel.cpp +++ llvm/trunk/lib/Target/X86/X86FastISel.cpp @@ -1187,7 +1187,7 @@ CC != CallingConv::X86_StdCall && CC != CallingConv::X86_ThisCall && CC != CallingConv::X86_64_SysV && - CC != CallingConv::X86_64_Win64) + CC != CallingConv::Win64) return false; // Don't handle popping bytes if they don't fit the ret's immediate. @@ -3171,7 +3171,7 @@ case CallingConv::X86_FastCall: case CallingConv::X86_StdCall: case CallingConv::X86_ThisCall: - case CallingConv::X86_64_Win64: + case CallingConv::Win64: case CallingConv::X86_64_SysV: break; } Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -2668,7 +2668,7 @@ switch (CC) { // C calling conventions: case CallingConv::C: - case CallingConv::X86_64_Win64: + case CallingConv::Win64: case CallingConv::X86_64_SysV: // Callee pop conventions: case CallingConv::X86_ThisCall: Index: llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp +++ llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp @@ -224,7 +224,7 @@ const TargetRegisterClass * X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const { const Function *F = MF.getFunction(); - if (IsWin64 || (F && F->getCallingConv() == CallingConv::X86_64_Win64)) + if (IsWin64 || (F && F->getCallingConv() == CallingConv::Win64)) return &X86::GR64_TCW64RegClass; else if (Is64Bit) return &X86::GR64_TCRegClass; @@ -334,7 +334,7 @@ if (Is64Bit) return CSR_64_MostRegs_SaveList; break; - case CallingConv::X86_64_Win64: + case CallingConv::Win64: if (!HasSSE) return CSR_Win64_NoSSE_SaveList; return CSR_Win64_SaveList; @@ -450,7 +450,7 @@ if (Is64Bit) return CSR_64_MostRegs_RegMask; break; - case CallingConv::X86_64_Win64: + case CallingConv::Win64: return CSR_Win64_RegMask; case CallingConv::X86_64_SysV: return CSR_64_RegMask; Index: llvm/trunk/lib/Target/X86/X86Subtarget.h =================================================================== --- llvm/trunk/lib/Target/X86/X86Subtarget.h +++ llvm/trunk/lib/Target/X86/X86Subtarget.h @@ -597,7 +597,7 @@ case CallingConv::Intel_OCL_BI: return isTargetWin64(); // This convention allows using the Win64 convention on other targets. - case CallingConv::X86_64_Win64: + case CallingConv::Win64: return true; // This convention allows using the SysV convention on Windows targets. case CallingConv::X86_64_SysV: Index: llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -3039,7 +3039,7 @@ } void visitVAStartInst(VAStartInst &I) override { - if (F.getCallingConv() == CallingConv::X86_64_Win64) + if (F.getCallingConv() == CallingConv::Win64) return; IRBuilder<> IRB(&I); VAStartInstrumentationList.push_back(&I); @@ -3053,7 +3053,7 @@ } void visitVACopyInst(VACopyInst &I) override { - if (F.getCallingConv() == CallingConv::X86_64_Win64) + if (F.getCallingConv() == CallingConv::Win64) return; IRBuilder<> IRB(&I); Value *VAListTag = I.getArgOperand(0); Index: llvm/trunk/test/Bitcode/compatibility-3.6.ll =================================================================== --- llvm/trunk/test/Bitcode/compatibility-3.6.ll +++ llvm/trunk/test/Bitcode/compatibility-3.6.ll @@ -368,9 +368,9 @@ declare x86_64_sysvcc void @f.x86_64_sysvcc() ; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc() declare cc79 void @f.cc79() -; CHECK: declare x86_64_win64cc void @f.cc79() -declare x86_64_win64cc void @f.x86_64_win64cc() -; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.cc79() +declare win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.x86_64_win64cc() declare cc80 void @f.cc80() ; CHECK: declare x86_vectorcallcc void @f.cc80() declare x86_vectorcallcc void @f.x86_vectorcallcc() Index: llvm/trunk/test/Bitcode/compatibility-3.7.ll =================================================================== --- llvm/trunk/test/Bitcode/compatibility-3.7.ll +++ llvm/trunk/test/Bitcode/compatibility-3.7.ll @@ -368,9 +368,9 @@ declare x86_64_sysvcc void @f.x86_64_sysvcc() ; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc() declare cc79 void @f.cc79() -; CHECK: declare x86_64_win64cc void @f.cc79() -declare x86_64_win64cc void @f.x86_64_win64cc() -; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.cc79() +declare win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.x86_64_win64cc() declare cc80 void @f.cc80() ; CHECK: declare x86_vectorcallcc void @f.cc80() declare x86_vectorcallcc void @f.x86_vectorcallcc() Index: llvm/trunk/test/Bitcode/compatibility-3.8.ll =================================================================== --- llvm/trunk/test/Bitcode/compatibility-3.8.ll +++ llvm/trunk/test/Bitcode/compatibility-3.8.ll @@ -393,9 +393,9 @@ declare x86_64_sysvcc void @f.x86_64_sysvcc() ; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc() declare cc79 void @f.cc79() -; CHECK: declare x86_64_win64cc void @f.cc79() -declare x86_64_win64cc void @f.x86_64_win64cc() -; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.cc79() +declare win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.x86_64_win64cc() declare cc80 void @f.cc80() ; CHECK: declare x86_vectorcallcc void @f.cc80() declare x86_vectorcallcc void @f.x86_vectorcallcc() Index: llvm/trunk/test/Bitcode/compatibility-3.9.ll =================================================================== --- llvm/trunk/test/Bitcode/compatibility-3.9.ll +++ llvm/trunk/test/Bitcode/compatibility-3.9.ll @@ -422,9 +422,9 @@ declare x86_64_sysvcc void @f.x86_64_sysvcc() ; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc() declare cc79 void @f.cc79() -; CHECK: declare x86_64_win64cc void @f.cc79() -declare x86_64_win64cc void @f.x86_64_win64cc() -; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.cc79() +declare win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.x86_64_win64cc() declare cc80 void @f.cc80() ; CHECK: declare x86_vectorcallcc void @f.cc80() declare x86_vectorcallcc void @f.x86_vectorcallcc() Index: llvm/trunk/test/Bitcode/compatibility-4.0.ll =================================================================== --- llvm/trunk/test/Bitcode/compatibility-4.0.ll +++ llvm/trunk/test/Bitcode/compatibility-4.0.ll @@ -422,9 +422,9 @@ declare x86_64_sysvcc void @f.x86_64_sysvcc() ; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc() declare cc79 void @f.cc79() -; CHECK: declare x86_64_win64cc void @f.cc79() -declare x86_64_win64cc void @f.x86_64_win64cc() -; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.cc79() +declare win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.x86_64_win64cc() declare cc80 void @f.cc80() ; CHECK: declare x86_vectorcallcc void @f.cc80() declare x86_vectorcallcc void @f.x86_vectorcallcc() Index: llvm/trunk/test/Bitcode/compatibility.ll =================================================================== --- llvm/trunk/test/Bitcode/compatibility.ll +++ llvm/trunk/test/Bitcode/compatibility.ll @@ -425,9 +425,9 @@ declare x86_64_sysvcc void @f.x86_64_sysvcc() ; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc() declare cc79 void @f.cc79() -; CHECK: declare x86_64_win64cc void @f.cc79() -declare x86_64_win64cc void @f.x86_64_win64cc() -; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc() +; CHECK: declare win64cc void @f.cc79() +declare win64cc void @f.win64cc() +; CHECK: declare win64cc void @f.win64cc() declare cc80 void @f.cc80() ; CHECK: declare x86_vectorcallcc void @f.cc80() declare x86_vectorcallcc void @f.x86_vectorcallcc() Index: llvm/trunk/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll +++ llvm/trunk/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll @@ -0,0 +1,74 @@ +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s + +define win64cc void @pass_va(i32 %count, ...) nounwind { +entry: +; CHECK: sub sp, sp, #80 +; CHECK: add x8, sp, #24 +; CHECK: add x0, sp, #24 +; CHECK: stp x6, x7, [sp, #64] +; CHECK: stp x4, x5, [sp, #48] +; CHECK: stp x2, x3, [sp, #32] +; CHECK: str x1, [sp, #24] +; CHECK: stp x30, x8, [sp] +; CHECK: bl other_func +; CHECK: ldr x30, [sp], #80 +; CHECK: ret + %ap = alloca i8*, align 8 + %ap1 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap1) + %ap2 = load i8*, i8** %ap, align 8 + call void @other_func(i8* %ap2) + ret void +} + +declare void @other_func(i8*) local_unnamed_addr + +declare void @llvm.va_start(i8*) nounwind +declare void @llvm.va_copy(i8*, i8*) nounwind + +; CHECK-LABEL: f9: +; CHECK: sub sp, sp, #16 +; CHECK: add x8, sp, #24 +; CHECK: add x0, sp, #24 +; CHECK: str x8, [sp, #8] +; CHECK: add sp, sp, #16 +; CHECK: ret +define win64cc i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %ap1 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap1) + %ap2 = load i8*, i8** %ap, align 8 + ret i8* %ap2 +} + +; CHECK-LABEL: f8: +; CHECK: sub sp, sp, #16 +; CHECK: add x8, sp, #16 +; CHECK: add x0, sp, #16 +; CHECK: str x8, [sp, #8] +; CHECK: add sp, sp, #16 +; CHECK: ret +define win64cc i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %ap1 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap1) + %ap2 = load i8*, i8** %ap, align 8 + ret i8* %ap2 +} + +; CHECK-LABEL: f7: +; CHECK: sub sp, sp, #16 +; CHECK: add x8, sp, #8 +; CHECK: add x0, sp, #8 +; CHECK: stp x8, x7, [sp], #16 +; CHECK: ret +define win64cc i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %ap1 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap1) + %ap2 = load i8*, i8** %ap, align 8 + ret i8* %ap2 +} Index: llvm/trunk/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll +++ llvm/trunk/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s ; CHECK-NOT: -{{[1-9][0-9]*}}(%rsp) -define x86_64_win64cc x86_fp80 @a(i64 %x) nounwind readnone { +define win64cc x86_fp80 @a(i64 %x) nounwind readnone { entry: %conv = sitofp i64 %x to x86_fp80 ; [#uses=1] ret x86_fp80 %conv Index: llvm/trunk/test/CodeGen/X86/fast-isel-x86-64.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fast-isel-x86-64.ll +++ llvm/trunk/test/CodeGen/X86/fast-isel-x86-64.ll @@ -316,7 +316,7 @@ ; STDERR-NOT: FastISel missed terminator: ret void ; CHECK-LABEL: win64ccfun -define x86_64_win64cc void @win64ccfun(i32 %i) { +define win64cc void @win64ccfun(i32 %i) { ; CHECK: ret ret void } Index: llvm/trunk/test/CodeGen/X86/sibcall-win64.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sibcall-win64.ll +++ llvm/trunk/test/CodeGen/X86/sibcall-win64.ll @@ -1,15 +1,15 @@ ; RUN: llc < %s -mtriple=x86_64-pc-linux | FileCheck %s -declare x86_64_win64cc void @win64_callee(i32) -declare x86_64_win64cc void (i32)* @win64_indirect() -declare x86_64_win64cc void @win64_other(i32) +declare win64cc void @win64_callee(i32) +declare win64cc void (i32)* @win64_indirect() +declare win64cc void @win64_other(i32) declare void @sysv_callee(i32) declare void (i32)* @sysv_indirect() declare void @sysv_other(i32) define void @sysv_caller(i32 %p1) { entry: - tail call x86_64_win64cc void @win64_callee(i32 %p1) + tail call win64cc void @win64_callee(i32 %p1) ret void } @@ -19,7 +19,7 @@ ; CHECK: addq $40, %rsp ; CHECK: retq -define x86_64_win64cc void @win64_caller(i32 %p1) { +define win64cc void @win64_caller(i32 %p1) { entry: tail call void @sysv_callee(i32 %p1) ret void @@ -37,18 +37,18 @@ ; CHECK-LABEL: sysv_matched: ; CHECK: jmp sysv_callee # TAILCALL -define x86_64_win64cc void @win64_matched(i32 %p1) { - tail call x86_64_win64cc void @win64_callee(i32 %p1) +define win64cc void @win64_matched(i32 %p1) { + tail call win64cc void @win64_callee(i32 %p1) ret void } ; CHECK-LABEL: win64_matched: ; CHECK: jmp win64_callee # TAILCALL -define x86_64_win64cc void @win64_indirect_caller(i32 %p1) { - %1 = call x86_64_win64cc void (i32)* @win64_indirect() - call x86_64_win64cc void @win64_other(i32 0) - tail call x86_64_win64cc void %1(i32 %p1) +define win64cc void @win64_indirect_caller(i32 %p1) { + %1 = call win64cc void (i32)* @win64_indirect() + call win64cc void @win64_other(i32 0) + tail call win64cc void %1(i32 %p1) ret void } Index: llvm/trunk/test/CodeGen/X86/win64-nosse-csrs.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/win64-nosse-csrs.ll +++ llvm/trunk/test/CodeGen/X86/win64-nosse-csrs.ll @@ -20,7 +20,7 @@ } ; Function Attrs: nounwind uwtable -define x86_64_win64cc i64 @peach() unnamed_addr #1 { +define win64cc i64 @peach() unnamed_addr #1 { entry-block: %0 = call i64 @banana() ret i64 %0 Index: llvm/trunk/test/CodeGen/X86/win64_nonvol.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/win64_nonvol.ll +++ llvm/trunk/test/CodeGen/X86/win64_nonvol.ll @@ -5,7 +5,7 @@ ; Win64 nonvolatile registers get saved. ; CHECK-LABEL: bar: -define x86_64_win64cc void @bar(i32 %a, i32 %b) { +define win64cc void @bar(i32 %a, i32 %b) { ; CHECK-DAG: pushq %rdi ; CHECK-DAG: pushq %rsi ; CHECK-DAG: movaps %xmm6, Index: llvm/trunk/test/CodeGen/X86/win64_params.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/win64_params.ll +++ llvm/trunk/test/CodeGen/X86/win64_params.ll @@ -12,7 +12,7 @@ ret i32 %add } -define x86_64_win64cc i32 @f7(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6) nounwind readnone optsize { +define win64cc i32 @f7(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6) nounwind readnone optsize { entry: ; CHECK: movl 48(%rsp), %eax ; CHECK: addl 40(%rsp), %eax Index: llvm/trunk/test/CodeGen/X86/win_chkstk.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/win_chkstk.ll +++ llvm/trunk/test/CodeGen/X86/win_chkstk.ll @@ -51,7 +51,7 @@ ; Make sure we don't call __chkstk or __alloca on non-Windows even if the ; caller has the Win64 calling convention. -define x86_64_win64cc i32 @main4k_win64() nounwind { +define win64cc i32 @main4k_win64() nounwind { entry: ; WIN_X32: calll __chkstk ; WIN_X64: callq __chkstk Index: llvm/trunk/test/CodeGen/X86/win_coreclr_chkstk.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/win_coreclr_chkstk.ll +++ llvm/trunk/test/CodeGen/X86/win_coreclr_chkstk.ll @@ -103,7 +103,7 @@ ; Make sure we don't emit the probe sequence if not on windows even if the ; caller has the Win64 calling convention. -define x86_64_win64cc i32 @main4k_win64() nounwind { +define win64cc i32 @main4k_win64() nounwind { entry: ; WIN_X64: movq %gs:16, %rcx ; LINUX-NOT: movq %gs:16, %rcx @@ -115,7 +115,7 @@ declare i32 @bar(i8*) nounwind ; Within-body inline probe expansion -define x86_64_win64cc i32 @main4k_alloca(i64 %n) nounwind { +define win64cc i32 @main4k_alloca(i64 %n) nounwind { entry: ; WIN_X64: callq bar ; WIN_X64: movq %gs:16, [[R:%r.*]] Index: llvm/trunk/test/CodeGen/X86/x86-64-ms_abi-vararg.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/x86-64-ms_abi-vararg.ll +++ llvm/trunk/test/CodeGen/X86/x86-64-ms_abi-vararg.ll @@ -3,7 +3,7 @@ ; Verify that the var arg parameters which are passed in registers are stored ; in home stack slots allocated by the caller and that AP is correctly ; calculated. -define x86_64_win64cc void @average_va(i32 %count, ...) nounwind { +define win64cc void @average_va(i32 %count, ...) nounwind { entry: ; CHECK: pushq ; CHECK: movq %r9, 40(%rsp) @@ -24,7 +24,7 @@ ; CHECK-LABEL: f5: ; CHECK: pushq ; CHECK: leaq 56(%rsp), -define x86_64_win64cc i8** @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind { +define win64cc i8** @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind { entry: %ap = alloca i8*, align 8 %ap.0 = bitcast i8** %ap to i8* @@ -35,7 +35,7 @@ ; CHECK-LABEL: f4: ; CHECK: pushq ; CHECK: leaq 48(%rsp), -define x86_64_win64cc i8** @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +define win64cc i8** @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { entry: %ap = alloca i8*, align 8 %ap.0 = bitcast i8** %ap to i8* @@ -46,7 +46,7 @@ ; CHECK-LABEL: f3: ; CHECK: pushq ; CHECK: leaq 40(%rsp), -define x86_64_win64cc i8** @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind { +define win64cc i8** @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind { entry: %ap = alloca i8*, align 8 %ap.0 = bitcast i8** %ap to i8* @@ -62,7 +62,7 @@ ; CHECK: movq [[REG_copy1]], 8(%rsp) ; CHECK: movq [[REG_copy1]], (%rsp) ; CHECK: ret -define x86_64_win64cc void @copy1(i64 %a0, ...) nounwind { +define win64cc void @copy1(i64 %a0, ...) nounwind { entry: %ap = alloca i8*, align 8 %cp = alloca i8*, align 8 @@ -78,7 +78,7 @@ ; CHECK: movq [[REG_copy4]], 8(%rsp) ; CHECK: movq [[REG_copy4]], (%rsp) ; CHECK: ret -define x86_64_win64cc void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +define win64cc void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { entry: %ap = alloca i8*, align 8 %cp = alloca i8*, align 8 @@ -96,7 +96,7 @@ ; CHECK: movq [[REG_arg4_2]], (%rsp) ; CHECK: movl 48(%rsp), %eax ; CHECK: ret -define x86_64_win64cc i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +define win64cc i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { entry: %ap = alloca i8*, align 8 %ap.0 = bitcast i8** %ap to i8* Index: llvm/trunk/utils/vim/syntax/llvm.vim =================================================================== --- llvm/trunk/utils/vim/syntax/llvm.vim +++ llvm/trunk/utils/vim/syntax/llvm.vim @@ -161,7 +161,7 @@ \ within \ writeonly \ x86_64_sysvcc - \ x86_64_win64cc + \ win64cc \ x86_fastcallcc \ x86_stdcallcc \ x86_thiscallcc