diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -273,7 +273,14 @@ // void *__overflow_arg_area; // void *__reg_save_area; // } va_list[1]; - SystemZBuiltinVaList + SystemZBuiltinVaList, + + // typedef struct __va_list_tag { + // void *__current_saved_reg_area_pointer; + // void *__saved_reg_area_end_pointer; + // void *__overflow_area_pointer; + //} va_list; + HexagonBuiltinVaList }; protected: diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -7795,6 +7795,57 @@ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } +static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { + // typedef struct __va_list_tag { + RecordDecl *VaListTagDecl; + VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 3; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // void *CurrentSavedRegisterArea; + FieldTypes[0] = Context->getPointerType(Context->VoidTy); + FieldNames[0] = "__current_saved_reg_area_pointer"; + + // void *SavedRegAreaEnd; + FieldTypes[1] = Context->getPointerType(Context->VoidTy); + FieldNames[1] = "__saved_reg_area_end_pointer"; + + // void *OverflowArea; + FieldTypes[2] = Context->getPointerType(Context->VoidTy); + FieldNames[2] = "__overflow_area_pointer"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create( + const_cast(*Context), VaListTagDecl, SourceLocation(), + SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], + /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + Context->VaListTagDecl = VaListTagDecl; + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + + // } __va_list_tag; + TypedefDecl *VaListTagTypedefDecl = + Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); + + QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); + + // typedef __va_list_tag __builtin_va_list[1]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); + QualType VaListTagArrayType = Context->getConstantArrayType( + VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); + + return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); +} + static TypedefDecl *CreateVaListDecl(const ASTContext *Context, TargetInfo::BuiltinVaListKind Kind) { switch (Kind) { @@ -7814,6 +7865,8 @@ return CreateAAPCSABIBuiltinVaListDecl(Context); case TargetInfo::SystemZBuiltinVaList: return CreateSystemZBuiltinVaListDecl(Context); + case TargetInfo::HexagonBuiltinVaList: + return CreateHexagonBuiltinVaListDecl(Context); } llvm_unreachable("Unhandled __builtin_va_list type kind"); diff --git a/clang/lib/Basic/Targets/Hexagon.h b/clang/lib/Basic/Targets/Hexagon.h --- a/clang/lib/Basic/Targets/Hexagon.h +++ b/clang/lib/Basic/Targets/Hexagon.h @@ -103,6 +103,8 @@ DiagnosticsEngine &Diags) override; BuiltinVaListKind getBuiltinVaListKind() const override { + if (getTriple().isMusl()) + return TargetInfo::HexagonBuiltinVaList; return TargetInfo::CharPtrBuiltinVaList; } diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -7584,18 +7584,25 @@ namespace { -class HexagonABIInfo : public ABIInfo { +class HexagonABIInfo : public DefaultABIInfo { public: - HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} + HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} private: ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType RetTy) const; + ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const; void computeInfo(CGFunctionInfo &FI) const override; Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty) const override; + Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr, + QualType Ty) const; + Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr, + QualType Ty) const; + Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr, + QualType Ty) const; }; class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { @@ -7606,23 +7613,63 @@ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { return 29; } + + void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, + CodeGen::CodeGenModule &GCM) const override { + if (GV->isDeclaration()) + return; + const FunctionDecl *FD = dyn_cast_or_null(D); + if (!FD) + return; + } }; } // namespace void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { + unsigned RegsLeft = 6; if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type); + I.info = classifyArgumentType(I.type, &RegsLeft); } -ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { +static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) { + assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits" + " through registers"); + + if (*RegsLeft == 0) + return false; + + if (Size <= 32) { + (*RegsLeft)--; + return true; + } + + if (2 <= (*RegsLeft & (~1U))) { + *RegsLeft = (*RegsLeft & (~1U)) - 2; + return true; + } + + // Next available register was r5 but candidate was greater than 32-bits so it + // has to go on the stack. However we still consume r5 + if (*RegsLeft == 1) + *RegsLeft = 0; + + return false; +} + +ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty, + unsigned *RegsLeft) const { if (!isAggregateTypeForABI(Ty)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); + uint64_t Size = getContext().getTypeSize(Ty); + if (Size <= 64) + HexagonAdjustRegsLeft(Size, RegsLeft); + return Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) : ABIArgInfo::getDirect(); } @@ -7635,13 +7682,20 @@ return ABIArgInfo::getIgnore(); uint64_t Size = getContext().getTypeSize(Ty); - if (Size <= 64) { + unsigned Align = getContext().getTypeAlign(Ty); + + if (Size > 64) + return getNaturalAlignIndirect(Ty, /*ByVal=*/true); + + if (HexagonAdjustRegsLeft(Size, RegsLeft)) + Align = Size <= 32 ? 32 : 64; + if (Size <= Align) { // Pass in the smallest viable integer type. if (!llvm::isPowerOf2_64(Size)) Size = llvm::NextPowerOf2(Size); return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); } - return getNaturalAlignIndirect(Ty, /*ByVal=*/true); + return DefaultABIInfo::classifyArgumentType(Ty); } ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { @@ -7659,7 +7713,6 @@ if (Size == VecSize || Size == 2*VecSize) return ABIArgInfo::getDirectInReg(); } - // Large vector types should be returned via memory. if (Size > 64) return getNaturalAlignIndirect(RetTy); @@ -7688,13 +7741,242 @@ return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); } +Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF, + Address VAListAddr, + QualType Ty) const { + // Load the overflow area pointer. + Address __overflow_area_pointer_p = + CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); + llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( + __overflow_area_pointer_p, "__overflow_area_pointer"); + + uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; + if (Align > 4) { + // Alignment should be a power of 2. + assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!"); + + // overflow_arg_area = (overflow_arg_area + align - 1) & -align; + llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); + + // Add offset to the current pointer to access the argument. + __overflow_area_pointer = + CGF.Builder.CreateGEP(__overflow_area_pointer, Offset); + llvm::Value *AsInt = + CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); + + // Create a mask which should be "AND"ed + // with (overflow_arg_area + align - 1) + llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align); + __overflow_area_pointer = CGF.Builder.CreateIntToPtr( + CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(), + "__overflow_area_pointer.align"); + } + + // Get the type of the argument from memory and bitcast + // overflow area pointer to the argument type. + llvm::Type *PTy = CGF.ConvertTypeForMem(Ty); + Address AddrTyped = CGF.Builder.CreateBitCast( + Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)), + llvm::PointerType::getUnqual(PTy)); + + // Round up to the minimum stack alignment for varargs which is 4 bytes. + uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); + + __overflow_area_pointer = CGF.Builder.CreateGEP( + __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset), + "__overflow_area_pointer.next"); + CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p); + + return AddrTyped; +} + +Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF, + Address VAListAddr, + QualType Ty) const { + // FIXME: Need to handle alignment + llvm::Type *BP = CGF.Int8PtrTy; + llvm::Type *BPP = CGF.Int8PtrPtrTy; + CGBuilderTy &Builder = CGF.Builder; + Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); + llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); + // Handle address alignment for type alignment > 32 bits + uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; + if (TyAlign > 4) { + assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!"); + llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); + AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); + AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); + Addr = Builder.CreateIntToPtr(AddrAsInt, BP); + } + llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); + Address AddrTyped = Builder.CreateBitCast( + Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy); + + uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); + llvm::Value *NextAddr = Builder.CreateGEP( + Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); + Builder.CreateStore(NextAddr, VAListAddrAsBPP); + + return AddrTyped; +} + +Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, + Address VAListAddr, + QualType Ty) const { + int ArgSize = CGF.getContext().getTypeSize(Ty) / 8; + + if (ArgSize > 8) + return EmitVAArgFromMemory(CGF, VAListAddr, Ty); + + // Here we have check if the argument is in register area or + // in overflow area. + // If the saved register area pointer + argsize rounded up to alignment > + // saved register area end pointer, argument is in overflow area. + unsigned RegsLeft = 6; + Ty = CGF.getContext().getCanonicalType(Ty); + (void)classifyArgumentType(Ty, &RegsLeft); + + llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); + llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); + llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); + + // Get rounded size of the argument.GCC does not allow vararg of + // size < 4 bytes. We follow the same logic here. + ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; + int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; + + // Argument may be in saved register area + CGF.EmitBlock(MaybeRegBlock); + + // Load the current saved register area pointer. + Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP( + VAListAddr, 0, "__current_saved_reg_area_pointer_p"); + llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad( + __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer"); + + // Load the saved register area end pointer. + Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP( + VAListAddr, 1, "__saved_reg_area_end_pointer_p"); + llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad( + __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer"); + + // If the size of argument is > 4 bytes, check if the stack + // location is aligned to 8 bytes + if (ArgAlign > 4) { + + llvm::Value *__current_saved_reg_area_pointer_int = + CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer, + CGF.Int32Ty); + + __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd( + __current_saved_reg_area_pointer_int, + llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)), + "align_current_saved_reg_area_pointer"); + + __current_saved_reg_area_pointer_int = + CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int, + llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), + "align_current_saved_reg_area_pointer"); + + __current_saved_reg_area_pointer = + CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int, + __current_saved_reg_area_pointer->getType(), + "align_current_saved_reg_area_pointer"); + } + + llvm::Value *__new_saved_reg_area_pointer = + CGF.Builder.CreateGEP(__current_saved_reg_area_pointer, + llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), + "__new_saved_reg_area_pointer"); + + llvm::Value *UsingStack = 0; + UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer, + __saved_reg_area_end_pointer); + + CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock); + + // Argument in saved register area + // Implement the block where argument is in register saved area + CGF.EmitBlock(InRegBlock); + + llvm::Type *PTy = CGF.ConvertType(Ty); + llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast( + __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy)); + + CGF.Builder.CreateStore(__new_saved_reg_area_pointer, + __current_saved_reg_area_pointer_p); + + CGF.EmitBranch(ContBlock); + + // Argument in overflow area + // Implement the block where the argument is in overflow area. + CGF.EmitBlock(OnStackBlock); + + // Load the overflow area pointer + Address __overflow_area_pointer_p = + CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); + llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( + __overflow_area_pointer_p, "__overflow_area_pointer"); + + // Align the overflow area pointer according to the alignment of the argument + if (ArgAlign > 4) { + llvm::Value *__overflow_area_pointer_int = + CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); + + __overflow_area_pointer_int = + CGF.Builder.CreateAdd(__overflow_area_pointer_int, + llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1), + "align_overflow_area_pointer"); + + __overflow_area_pointer_int = + CGF.Builder.CreateAnd(__overflow_area_pointer_int, + llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), + "align_overflow_area_pointer"); + + __overflow_area_pointer = CGF.Builder.CreateIntToPtr( + __overflow_area_pointer_int, __overflow_area_pointer->getType(), + "align_overflow_area_pointer"); + } + + // Get the pointer for next argument in overflow area and store it + // to overflow area pointer. + llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP( + __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), + "__overflow_area_pointer.next"); + + CGF.Builder.CreateStore(__new_overflow_area_pointer, + __overflow_area_pointer_p); + + CGF.Builder.CreateStore(__new_overflow_area_pointer, + __current_saved_reg_area_pointer_p); + + // Bitcast the overflow area pointer to the type of argument. + llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty); + llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast( + __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy)); + + CGF.EmitBranch(ContBlock); + + // Get the correct pointer to load the variable argument + // Implement the ContBlock + CGF.EmitBlock(ContBlock); + + llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); + llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr"); + ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock); + ArgAddr->addIncoming(__overflow_area_p, OnStackBlock); + + return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign)); +} + Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty) const { - // FIXME: Someone needs to audit that this handle alignment correctly. - return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, - getContext().getTypeInfoInChars(Ty), - CharUnits::fromQuantity(4), - /*AllowHigherAlign*/ true); + + if (getTarget().getTriple().isMusl()) + return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty); + + return EmitVAArgForHexagon(CGF, VAListAddr, Ty); } //===----------------------------------------------------------------------===// diff --git a/clang/test/CodeGen/hexagon-linux-vararg.c b/clang/test/CodeGen/hexagon-linux-vararg.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/hexagon-linux-vararg.c @@ -0,0 +1,81 @@ +// REQUIRES: hexagon-registered-target +// RUN: %clang_cc1 -emit-llvm -triple hexagon-unknown-linux-musl %s -o - | FileCheck %s +#include + +struct AAA { + int x; + int y; + int z; + int d; +}; + +// CHECK: call void @llvm.va_start(i8* %arraydecay1) +// CHECK: %arraydecay2 = getelementptr inbounds [1 x %struct.__va_list_tag], +// [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0 +// CHECK: br label %vaarg.maybe_reg + +// CHECK: vaarg.maybe_reg: ; preds = %entry +// CHECK: %__current_saved_reg_area_pointer_p = getelementptr inbounds +// %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 0 +// CHECK: %__current_saved_reg_area_pointer = load i8*, i8** +// %__current_saved_reg_area_pointer_p +// CHECK: %__saved_reg_area_end_pointer_p = getelementptr inbounds +// %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 1 +// CHECK: %__saved_reg_area_end_pointer = load i8*, i8** +// %__saved_reg_area_end_pointer_p +// CHECK: %__new_saved_reg_area_pointer = getelementptr i8, i8* +// %__current_saved_reg_area_pointer, i32 4 +// CHECK: %0 = icmp sgt i8* %__new_saved_reg_area_pointer, +// %__saved_reg_area_end_pointer +// CHECK: br i1 %0, label %vaarg.on_stack, label %vaarg.in_reg + +// CHECK: vaarg.in_reg: ; preds = +// %vaarg.maybe_reg +// CHECK: %1 = bitcast i8* %__current_saved_reg_area_pointer to i32* +// CHECK: store i8* %__new_saved_reg_area_pointer, i8** +// %__current_saved_reg_area_pointer_p +// CHECK: br label %vaarg.end + +// CHECK: vaarg.on_stack: ; preds = +// %vaarg.maybe_reg +// CHECK: %__overflow_area_pointer_p = getelementptr inbounds +// %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 2 +// CHECK: %__overflow_area_pointer = load i8*, i8** %__overflow_area_pointer_p +// CHECK: %__overflow_area_pointer.next = getelementptr i8, i8* +// %__overflow_area_pointer, i32 4 +// CHECK: store i8* %__overflow_area_pointer.next, i8** +// %__overflow_area_pointer_p +// CHECK: store i8* %__overflow_area_pointer.next, i8** +// %__current_saved_reg_area_pointer_p +// CHECK: %2 = bitcast i8* %__overflow_area_pointer to i32* +// CHECK: br label %vaarg.end + +// CHECK: vaarg.end: ; preds = +// %vaarg.on_stack, %vaarg.in_reg +// CHECK: %vaarg.addr = phi i32* [ %1, %vaarg.in_reg ], [ %2, %vaarg.on_stack +// ] +// CHECK: %3 = load i32, i32* %vaarg.addr + +struct AAA aaa = {100, 200, 300, 400}; + +int foo(int xx, ...) { + va_list ap; + int d; + int ret = 0; + struct AAA bbb; + va_start(ap, xx); + d = va_arg(ap, int); + ret += d; + bbb = va_arg(ap, struct AAA); + ret += bbb.d; + d = va_arg(ap, int); + ret += d; + va_end(ap); + return ret; +} + +int main(void) { + int x; + x = foo(1, 2, aaa, 4); + return x; +}