Index: cfe/trunk/lib/CodeGen/CGExprAgg.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGExprAgg.cpp +++ cfe/trunk/lib/CodeGen/CGExprAgg.cpp @@ -967,12 +967,9 @@ Address ArgValue = Address::invalid(); Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); + // If EmitVAArg fails, emit an error. if (!ArgPtr.isValid()) { - // If EmitVAArg fails, we fall back to the LLVM instruction. - llvm::Value *Val = Builder.CreateVAArg(ArgValue.getPointer(), - CGF.ConvertType(VE->getType())); - if (!Dest.isIgnored()) - Builder.CreateStore(Val, Dest.getAddress()); + CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); return; } Index: cfe/trunk/lib/CodeGen/CGExprScalar.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGExprScalar.cpp +++ cfe/trunk/lib/CodeGen/CGExprScalar.cpp @@ -3366,9 +3366,11 @@ llvm::Type *ArgTy = ConvertType(VE->getType()); - // If EmitVAArg fails, we fall back to the LLVM instruction. - if (!ArgPtr.isValid()) - return Builder.CreateVAArg(ArgValue.getPointer(), ArgTy); + // If EmitVAArg fails, emit an error. + if (!ArgPtr.isValid()) { + CGF.ErrorUnsupported(VE, "va_arg expression"); + return llvm::UndefValue::get(ArgTy); + } // FIXME Volatility. llvm::Value *Val = Builder.CreateLoad(ArgPtr); Index: cfe/trunk/lib/CodeGen/TargetInfo.cpp =================================================================== --- cfe/trunk/lib/CodeGen/TargetInfo.cpp +++ cfe/trunk/lib/CodeGen/TargetInfo.cpp @@ -525,6 +525,54 @@ } namespace { +Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, + const ABIArgInfo &AI) { + // This default implementation defers to the llvm backend's va_arg + // instruction. It can handle only passing arguments directly + // (typically only handled in the backend for primitive types), or + // aggregates passed indirectly by pointer (NOTE: if the "byval" + // flag has ABI impact in the callee, this implementation cannot + // work.) + + // Only a few cases are covered here at the moment -- those needed + // by the default abi. + llvm::Value *Val; + + if (AI.isIndirect()) { + assert(!AI.getPaddingType() && + "Unepxected PaddingType seen in arginfo in generic VAArg emitter!"); + assert( + !AI.getIndirectRealign() && + "Unepxected IndirectRealign seen in arginfo in generic VAArg emitter!"); + + auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); + CharUnits TyAlignForABI = TyInfo.second; + + llvm::Type *BaseTy = + llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); + llvm::Value *Addr = + CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); + return Address(Addr, TyAlignForABI); + } else { + assert((AI.isDirect() || AI.isExtend()) && + "Unexpected ArgInfo Kind in generic VAArg emitter!"); + + assert(!AI.getInReg() && + "Unepxected InReg seen in arginfo in generic VAArg emitter!"); + assert(!AI.getPaddingType() && + "Unepxected PaddingType seen in arginfo in generic VAArg emitter!"); + assert(!AI.getDirectOffset() && + "Unepxected DirectOffset seen in arginfo in generic VAArg emitter!"); + assert(!AI.getCoerceToType() && + "Unepxected CoerceToType seen in arginfo in generic VAArg emitter!"); + + Address Temp = CGF.CreateMemTemp(Ty, "varet"); + Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); + CGF.Builder.CreateStore(Val, Temp); + return Temp; + } +} + /// DefaultABIInfo - The default implementation for ABI specific /// details. This implementation provides information which results in /// self-consistent and sensible LLVM IR generation, but does not @@ -544,7 +592,9 @@ } Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, - QualType Ty) const override; + QualType Ty) const override { + return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); + } }; class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { @@ -553,11 +603,6 @@ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} }; -Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, - QualType Ty) const { - return Address::invalid(); -} - ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { Ty = useFirstFieldIfTransparentUnion(Ty); @@ -609,7 +654,8 @@ ABIArgInfo classifyArgumentType(QualType Ty) const; // DefaultABIInfo's classifyReturnType and classifyArgumentType are - // non-virtual, but computeInfo is virtual, so we overload that. + // non-virtual, but computeInfo and EmitVAArg is virtual, so we + // overload them. void computeInfo(CGFunctionInfo &FI) const override { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); @@ -713,7 +759,13 @@ Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty) const { - return Address::invalid(); + // The PNaCL ABI is a bit odd, in that varargs don't use normal + // function classification. Structs get passed directly for varargs + // functions, through a rewriting transform in + // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows + // this target to actually support a va_arg instructions with an + // aggregate type, unlike other targets. + return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); } /// \brief Classify argument of given type \p Ty. @@ -3516,13 +3568,15 @@ } +// TODO: this implementation is now likely redundant with +// DefaultABIInfo::EmitVAArg. Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, QualType Ty) const { const unsigned OverflowLimit = 8; if (const ComplexType *CTy = Ty->getAs()) { // TODO: Implement this. For now ignore. (void)CTy; - return Address::invalid(); + return Address::invalid(); // FIXME? } // struct __va_list_tag { @@ -3706,7 +3760,7 @@ namespace { /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. -class PPC64_SVR4_ABIInfo : public DefaultABIInfo { +class PPC64_SVR4_ABIInfo : public ABIInfo { public: enum ABIKind { ELFv1 = 0, @@ -3748,7 +3802,7 @@ public: PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX) - : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {} + : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {} bool isPromotableTypeForABI(QualType Ty) const; CharUnits getParamTypeAlignment(QualType Ty) const; @@ -4747,7 +4801,7 @@ // illegal vector types. Lower VAArg here for these cases and use // the LLVM va_arg instruction for everything else. if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) - return Address::invalid(); + return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); CharUnits SlotSize = CharUnits::fromQuantity(8); @@ -6967,6 +7021,8 @@ } // End anonymous namespace. +// TODO: this implementation is likely now redundant with the default +// EmitVAArg. Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty) const { CGBuilderTy &Builder = CGF.Builder; Index: cfe/trunk/test/CodeGen/le32-vaarg.c =================================================================== --- cfe/trunk/test/CodeGen/le32-vaarg.c +++ cfe/trunk/test/CodeGen/le32-vaarg.c @@ -6,7 +6,9 @@ } // CHECK: define i32 @get_int // CHECK: [[RESULT:%[a-z_0-9]+]] = va_arg {{.*}}, i32{{$}} -// CHECK: ret i32 [[RESULT]] +// CHECK: store i32 [[RESULT]], i32* [[LOC:%[a-z_0-9]+]] +// CHECK: [[RESULT2:%[a-z_0-9]+]] = load i32, i32* [[LOC]] +// CHECK: ret i32 [[RESULT2]] struct Foo { int x; @@ -19,7 +21,9 @@ } // CHECK: define void @get_struct // CHECK: [[RESULT:%[a-z_0-9]+]] = va_arg {{.*}}, %struct.Foo{{$}} -// CHECK: store %struct.Foo [[RESULT]], %struct.Foo* @dest +// CHECK: store %struct.Foo [[RESULT]], %struct.Foo* [[LOC:%[a-z_0-9]+]] +// CHECK: [[LOC2:%[a-z_0-9]+]] = bitcast {{.*}} [[LOC]] to i8* +// CHECK: call void @llvm.memcpy{{.*}}@dest{{.*}}, i8* [[LOC2]] void skip_struct(va_list *args) { va_arg(*args, struct Foo); Index: cfe/trunk/test/CodeGen/sparc-vaarg.c =================================================================== --- cfe/trunk/test/CodeGen/sparc-vaarg.c +++ cfe/trunk/test/CodeGen/sparc-vaarg.c @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -triple sparc -emit-llvm -o - %s | FileCheck %s +#include + +// CHECK-LABEL: define i32 @get_int +// CHECK: [[RESULT:%[a-z_0-9]+]] = va_arg {{.*}}, i32{{$}} +// CHECK: store i32 [[RESULT]], i32* [[LOC:%[a-z_0-9]+]] +// CHECK: [[RESULT2:%[a-z_0-9]+]] = load i32, i32* [[LOC]] +// CHECK: ret i32 [[RESULT2]] +int get_int(va_list *args) { + return va_arg(*args, int); +} + +struct Foo { + int x; +}; + +struct Foo dest; + +// CHECK-LABEL: define void @get_struct +// CHECK: [[RESULT:%[a-z_0-9]+]] = va_arg {{.*}}, %struct.Foo*{{$}} +// CHECK: [[RESULT2:%[a-z_0-9]+]] = bitcast {{.*}} [[RESULT]] to i8* +// CHECK: call void @llvm.memcpy{{.*}}@dest{{.*}}, i8* [[RESULT2]] +void get_struct(va_list *args) { + dest = va_arg(*args, struct Foo); +} + +enum E { Foo_one = 1 }; + +enum E enum_dest; + +// CHECK-LABEL: define void @get_enum +// CHECK: va_arg i8** {{.*}}, i32 +void get_enum(va_list *args) { + enum_dest = va_arg(*args, enum E); +}