Index: lib/CodeGen/CGCall.cpp =================================================================== --- lib/CodeGen/CGCall.cpp +++ lib/CodeGen/CGCall.cpp @@ -572,9 +572,9 @@ expandedTypes.push_back(ConvertType(type)); } -llvm::Function::arg_iterator -CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, - llvm::Function::arg_iterator AI) { +void CodeGenFunction::ExpandTypeFromArgs( + QualType Ty, LValue LV, const SmallVectorImpl &FnArgs, + SmallVectorImpl::const_iterator &ArgNoIter) { assert(LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."); @@ -584,9 +584,11 @@ for (unsigned Elt = 0; Elt < NumElts; ++Elt) { llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); LValue LV = MakeAddrLValue(EltAddr, EltTy); - AI = ExpandTypeFromArgs(EltTy, LV, AI); + ExpandTypeFromArgs(EltTy, LV, FnArgs, ArgNoIter); } - } else if (const RecordType *RT = Ty->getAs()) { + return; + } + if (const RecordType *RT = Ty->getAs()) { RecordDecl *RD = RT->getDecl(); if (RD->isUnion()) { // Unions can be here only in degenerative cases - all the fields are same @@ -606,29 +608,30 @@ if (LargestFD) { // FIXME: What are the right qualifiers here? LValue SubLV = EmitLValueForField(LV, LargestFD); - AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); + ExpandTypeFromArgs(LargestFD->getType(), SubLV, FnArgs, ArgNoIter); } } else { for (const auto *FD : RD->fields()) { QualType FT = FD->getType(); - // FIXME: What are the right qualifiers here? LValue SubLV = EmitLValueForField(LV, FD); - AI = ExpandTypeFromArgs(FT, SubLV, AI); + ExpandTypeFromArgs(FT, SubLV, FnArgs, ArgNoIter); } } - } else if (const ComplexType *CT = Ty->getAs()) { + return; + } + if (const ComplexType *CT = Ty->getAs()) { QualType EltTy = CT->getElementType(); llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); - EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); + auto AI = FnArgs[*(ArgNoIter++)]; + EmitStoreThroughLValue(RValue::get(AI), MakeAddrLValue(RealAddr, EltTy)); llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); - EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); - } else { - EmitStoreThroughLValue(RValue::get(AI), LV); - ++AI; + AI = FnArgs[*(ArgNoIter++)]; + EmitStoreThroughLValue(RValue::get(AI), MakeAddrLValue(ImagAddr, EltTy)); + return; } - - return AI; + auto AI = FnArgs[*(ArgNoIter++)]; + EmitStoreThroughLValue(RValue::get(AI), LV); } /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are @@ -1040,6 +1043,128 @@ return GetFunctionType(*Info); } +namespace { + +class CallArgsToIRArgsMapping { + const unsigned InvalidIndex = ~0U; + unsigned InallocaArgNo; + unsigned SRetArgNo; + unsigned TotalIRArgs; + SmallVector PaddingIRArgIndex; + SmallVector, 4> IRArgs; + +public: + CallArgsToIRArgsMapping(CodeGenModule &CGM, const CGFunctionInfo &FI) + : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), + PaddingIRArgIndex(FI.arg_size(), InvalidIndex), IRArgs(FI.arg_size()) { + construct(CGM, FI); + } + + bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } + unsigned getInallocaArgNo() const { + assert(hasInallocaArg()); + return InallocaArgNo; + } + + bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } + unsigned getSRetArgNo() const { + assert(hasSRetArg()); + return SRetArgNo; + } + + unsigned totalIRArgs() const { return TotalIRArgs; } + + bool hasPaddingArg(unsigned ArgNo) const { + assert(ArgNo < PaddingIRArgIndex.size()); + return PaddingIRArgIndex[ArgNo] != InvalidIndex; + } + unsigned getPaddingArgNo(unsigned ArgNo) const { + assert(hasPaddingArg(ArgNo)); + return PaddingIRArgIndex[ArgNo]; + } + + const SmallVectorImpl &getIRArgs(unsigned ArgNo) const { + assert(ArgNo < IRArgs.size()); + return IRArgs[ArgNo]; + } + +private: + void construct(CodeGenModule &CGM, const CGFunctionInfo &FI) { + unsigned IRArgNo = 0; + bool SwapThisWithSRet = false; + const ABIArgInfo &RetAI = FI.getReturnInfo(); + + if (RetAI.getKind() == ABIArgInfo::Indirect) { + SwapThisWithSRet = RetAI.isSRetAfterThis(); + SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; + } + + unsigned ArgNo = 0; + for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), + E = FI.arg_end(); + I != E; ++I, ++ArgNo) { + QualType ArgType = I->type; + const ABIArgInfo &AI = I->info; + + if (AI.getPaddingType()) + PaddingIRArgIndex[ArgNo] = IRArgNo++; + + // Number of IR arguments occupied by Call argument ArgNo. + unsigned NumIRArgs; + + switch (AI.getKind()) { + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + // FIXME: handle sseregparm someday... + llvm::StructType *STy = dyn_cast(AI.getCoerceToType()); + if (!isAAPCSVFP(FI, CGM.getTarget()) && STy) { + NumIRArgs = STy->getNumElements(); + } else { + NumIRArgs = 1; + } + break; + } + case ABIArgInfo::Indirect: + NumIRArgs = 1; + break; + case ABIArgInfo::Ignore: + case ABIArgInfo::InAlloca: + // ignore and inalloca doesn't have matching LLVM parameters. + NumIRArgs = 0; + break; + case ABIArgInfo::Expand: { + SmallVector Types; + // FIXME: This is rather inefficient. Do we ever actually need to do + // anything here? The result should be just reconstructed on the other + // side, so extension should be a non-issue. + CGM.getTypes().GetExpandedTypes(ArgType, Types); + NumIRArgs = Types.size(); + break; + } + default: + llvm_unreachable("Unknown ABIArgInfo lind!"); + } + + for (; NumIRArgs > 0; --NumIRArgs) { + IRArgs[ArgNo].push_back(IRArgNo++); + } + + // Skip over the sret parameter when it comes second. We already handled it + // above. + if (IRArgNo == 1 && SwapThisWithSRet) + IRArgNo++; + } + assert(ArgNo == FI.arg_size()); + + if (FI.usesInAlloca()) + InallocaArgNo = IRArgNo++; + + TotalIRArgs = IRArgNo; + } +}; + +} // namespace + void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, const Decl *TargetDecl, AttributeListType &PAL, @@ -1134,9 +1259,9 @@ FuncAttrs.addAttribute("no-realign-stack"); } + CallArgsToIRArgsMapping IRFunctionArgs(*this, FI); + QualType RetTy = FI.getReturnType(); - unsigned Index = 1; - bool SwapThisWithSRet = false; const ABIArgInfo &RetAI = FI.getReturnInfo(); switch (RetAI.getKind()) { case ABIArgInfo::Extend: @@ -1152,25 +1277,9 @@ case ABIArgInfo::Ignore: break; - case ABIArgInfo::InAlloca: { - // inalloca disables readnone and readonly - FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) - .removeAttribute(llvm::Attribute::ReadNone); - break; - } - + case ABIArgInfo::InAlloca: case ABIArgInfo::Indirect: { - llvm::AttrBuilder SRETAttrs; - SRETAttrs.addAttribute(llvm::Attribute::StructRet); - if (RetAI.getInReg()) - SRETAttrs.addAttribute(llvm::Attribute::InReg); - SwapThisWithSRet = RetAI.isSRetAfterThis(); - PAL.push_back(llvm::AttributeSet::get( - getLLVMContext(), SwapThisWithSRet ? 2 : Index, SRETAttrs)); - - if (!SwapThisWithSRet) - ++Index; - // sret disables readnone and readonly + // inalloca and sret disable readnone and readonly FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) .removeAttribute(llvm::Attribute::ReadNone); break; @@ -1189,28 +1298,45 @@ RetAttrs.addAttribute(llvm::Attribute::NonNull); } - if (RetAttrs.hasAttributes()) - PAL.push_back(llvm:: - AttributeSet::get(getLLVMContext(), - llvm::AttributeSet::ReturnIndex, - RetAttrs)); + // Attach return attributes. + if (RetAttrs.hasAttributes()) { + PAL.push_back(llvm::AttributeSet::get( + getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); + } - for (const auto &I : FI.arguments()) { - QualType ParamType = I.type; - const ABIArgInfo &AI = I.info; + // Attach attributes to sret. + if (IRFunctionArgs.hasSRetArg()) { + llvm::AttrBuilder SRETAttrs; + SRETAttrs.addAttribute(llvm::Attribute::StructRet); + if (RetAI.getInReg()) + SRETAttrs.addAttribute(llvm::Attribute::InReg); + PAL.push_back(llvm::AttributeSet::get( + getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); + } + + // Attach attributes to inalloca argument. + if (IRFunctionArgs.hasInallocaArg()) { llvm::AttrBuilder Attrs; + Attrs.addAttribute(llvm::Attribute::InAlloca); + PAL.push_back(llvm::AttributeSet::get( + getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); + } - // Skip over the sret parameter when it comes second. We already handled it - // above. - if (Index == 2 && SwapThisWithSRet) - ++Index; - if (AI.getPaddingType()) { + unsigned ArgNo = 0; + for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), + E = FI.arg_end(); + I != E; ++I, ++ArgNo) { + QualType ParamType = I->type; + const ABIArgInfo &AI = I->info; + llvm::AttrBuilder Attrs; + + // Add attribute for padding argument, if necessary. + if (IRFunctionArgs.hasPaddingArg(ArgNo)) { if (AI.getPaddingInReg()) - PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, - llvm::Attribute::InReg)); - // Increment Index if there is padding. - ++Index; + PAL.push_back(llvm::AttributeSet::get( + getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, + llvm::Attribute::InReg)); } // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we @@ -1223,24 +1349,11 @@ else if (ParamType->isUnsignedIntegerOrEnumerationType()) Attrs.addAttribute(llvm::Attribute::ZExt); // FALL THROUGH - case ABIArgInfo::Direct: { + case ABIArgInfo::Direct: if (AI.getInReg()) Attrs.addAttribute(llvm::Attribute::InReg); - - // FIXME: handle sseregparm someday... - - llvm::StructType *STy = - dyn_cast(AI.getCoerceToType()); - if (!isAAPCSVFP(FI, getTarget()) && STy) { - unsigned Extra = STy->getNumElements()-1; // 1 will be added below. - if (Attrs.hasAttributes()) - for (unsigned I = 0; I < Extra; ++I) - PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I, - Attrs)); - Index += Extra; - } break; - } + case ABIArgInfo::Indirect: if (AI.getInReg()) Attrs.addAttribute(llvm::Attribute::InReg); @@ -1256,25 +1369,14 @@ break; case ABIArgInfo::Ignore: - // Skip increment, no matching LLVM parameter. + case ABIArgInfo::Expand: continue; case ABIArgInfo::InAlloca: // inalloca disables readnone and readonly. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) .removeAttribute(llvm::Attribute::ReadNone); - // Skip increment, no matching LLVM parameter. continue; - - case ABIArgInfo::Expand: { - SmallVector types; - // FIXME: This is rather inefficient. Do we ever actually need to do - // anything here? The result should be just reconstructed on the other - // side, so extension should be a non-issue. - getTypes().GetExpandedTypes(ParamType, types); - Index += types.size(); - continue; - } } if (const auto *RefTy = ParamType->getAs()) { @@ -1286,17 +1388,13 @@ Attrs.addAttribute(llvm::Attribute::NonNull); } - if (Attrs.hasAttributes()) - PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs)); - ++Index; - } - - // Add the inalloca attribute to the trailing inalloca parameter if present. - if (FI.usesInAlloca()) { - llvm::AttrBuilder Attrs; - Attrs.addAttribute(llvm::Attribute::InAlloca); - PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs)); + if (Attrs.hasAttributes()) { + for (unsigned Index : IRFunctionArgs.getIRArgs(ArgNo)) + PAL.push_back( + llvm::AttributeSet::get(getLLVMContext(), Index + 1, Attrs)); + } } + assert(ArgNo == FI.arg_size()); if (FuncAttrs.hasAttributes()) PAL.push_back(llvm:: @@ -1344,33 +1442,29 @@ // FIXME: We no longer need the types from FunctionArgList; lift up and // simplify. - // Emit allocs for param decls. Give the LLVM Argument nodes names. - llvm::Function::arg_iterator AI = Fn->arg_begin(); + CallArgsToIRArgsMapping IRFunctionArgs(CGM, FI); + // Flattened function arguments. + SmallVector FnArgs; + FnArgs.reserve(IRFunctionArgs.totalIRArgs()); + for (auto &Arg : Fn->args()) { + FnArgs.push_back(&Arg); + } + assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); // If we're using inalloca, all the memory arguments are GEPs off of the last // parameter, which is a pointer to the complete memory area. llvm::Value *ArgStruct = nullptr; - if (FI.usesInAlloca()) { - llvm::Function::arg_iterator EI = Fn->arg_end(); - --EI; - ArgStruct = EI; + if (IRFunctionArgs.hasInallocaArg()) { + ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()]; assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); } - // Name the struct return parameter, which can come first or second. - const ABIArgInfo &RetAI = FI.getReturnInfo(); - bool SwapThisWithSRet = false; - if (RetAI.isIndirect()) { - SwapThisWithSRet = RetAI.isSRetAfterThis(); - if (SwapThisWithSRet) - ++AI; + // Name the struct return parameter. + if (IRFunctionArgs.hasSRetArg()) { + auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; AI->setName("agg.result"); AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NoAlias)); - if (SwapThisWithSRet) - --AI; // Go back to the beginning for 'this'. - else - ++AI; // Skip the sret parameter. } // Get the function-level nonnull attribute if it exists. @@ -1391,9 +1485,9 @@ // we can push the cleanups in the correct order for the ABI. assert(FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."); - unsigned ArgNo = 1; + unsigned ArgNo = 0; CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); - for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); + for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i, ++info_it, ++ArgNo) { const VarDecl *Arg = *i; QualType Ty = info_it->type; @@ -1402,20 +1496,20 @@ bool isPromoted = isa(Arg) && cast(Arg)->isKNRPromoted(); - // Skip the dummy padding argument. - if (ArgI.getPaddingType()) - ++AI; + const auto &IRArgNo = IRFunctionArgs.getIRArgs(ArgNo); switch (ArgI.getKind()) { case ABIArgInfo::InAlloca: { + assert(IRArgNo.size() == 0); llvm::Value *V = Builder.CreateStructGEP( ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName()); ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); - continue; // Don't increment AI! + break; } case ABIArgInfo::Indirect: { - llvm::Value *V = AI; + assert(IRArgNo.size() == 1); + llvm::Value *V = FnArgs[IRArgNo[0]]; if (!hasScalarEvaluationKind(Ty)) { // Aggregates and complex variables are accessed by reference. All we @@ -1461,7 +1555,8 @@ if (!isa(ArgI.getCoerceToType()) && ArgI.getCoerceToType() == ConvertType(Ty) && ArgI.getDirectOffset() == 0) { - assert(AI != Fn->arg_end() && "Argument mismatch!"); + assert(IRArgNo.size() == 1); + auto AI = FnArgs[IRArgNo[0]]; llvm::Value *V = AI; if (const ParmVarDecl *PVD = dyn_cast(Arg)) { @@ -1574,11 +1669,12 @@ if (SrcSize <= DstSize) { Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); + assert(STy->getNumElements() == IRArgNo.size()); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { - assert(AI != Fn->arg_end() && "Argument mismatch!"); + auto AI = FnArgs[IRArgNo[i]]; AI->setName(Arg->getName() + ".coerce" + Twine(i)); llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); - Builder.CreateStore(AI++, EltPtr); + Builder.CreateStore(AI, EltPtr); } } else { llvm::AllocaInst *TempAlloca = @@ -1586,20 +1682,22 @@ TempAlloca->setAlignment(AlignmentToUse); llvm::Value *TempV = TempAlloca; + assert(STy->getNumElements() == IRArgNo.size()); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { - assert(AI != Fn->arg_end() && "Argument mismatch!"); + auto AI = FnArgs[IRArgNo[i]]; AI->setName(Arg->getName() + ".coerce" + Twine(i)); llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); - Builder.CreateStore(AI++, EltPtr); + Builder.CreateStore(AI, EltPtr); } Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); } } else { // Simple case, just do a coerced store of the argument into the alloca. - assert(AI != Fn->arg_end() && "Argument mismatch!"); + assert(IRArgNo.size() == 1); + auto AI = FnArgs[IRArgNo[0]]; AI->setName(Arg->getName() + ".coerce"); - CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); + CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); } @@ -1612,7 +1710,7 @@ } else { ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); } - continue; // Skip ++AI increment, already done. + break; } case ABIArgInfo::Expand: { @@ -1623,17 +1721,20 @@ CharUnits Align = getContext().getDeclAlign(Arg); Alloca->setAlignment(Align.getQuantity()); LValue LV = MakeAddrLValue(Alloca, Ty, Align); - llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); - // Name the arguments used in expansion and increment AI. - unsigned Index = 0; - for (; AI != End; ++AI, ++Index) - AI->setName(Arg->getName() + "." + Twine(Index)); - continue; + auto IRArgNoIter = IRArgNo.begin(); + ExpandTypeFromArgs(Ty, LV, FnArgs, IRArgNoIter); + assert(IRArgNoIter == IRArgNo.end()); + for (unsigned i = 0, e = IRArgNo.size(); i != e; ++i) { + auto AI = FnArgs[IRArgNo[i]]; + AI->setName(Arg->getName() + "." + Twine(i)); + } + break; } case ABIArgInfo::Ignore: + assert(IRArgNo.size() == 0); // Initialize the local variable appropriately. if (!hasScalarEvaluationKind(Ty)) { ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); @@ -1641,21 +1742,10 @@ llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); } - - // Skip increment, no matching LLVM parameter. - continue; + break; } - - ++AI; - - if (ArgNo == 1 && SwapThisWithSRet) - ++AI; // Skip the sret parameter. } - if (FI.usesInAlloca()) - ++AI; - assert(AI == Fn->arg_end() && "Argument mismatch!"); - if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { for (int I = Args.size() - 1; I >= 0; --I) EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), @@ -2556,18 +2646,10 @@ return Inst; } -static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, - llvm::FunctionType *FTy) { - if (ArgNo < FTy->getNumParams()) - assert(Elt->getType() == FTy->getParamType(ArgNo)); - else - assert(FTy->isVarArg()); - ++ArgNo; -} - -void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, - SmallVectorImpl &Args, - llvm::FunctionType *IRFuncTy) { +void CodeGenFunction::ExpandTypeToArgs( + QualType Ty, RValue RV, SmallVectorImpl &IRCallArgs, + SmallVectorImpl::const_iterator &IRArgNoIter, + llvm::FunctionType *IRFuncTy) { if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { unsigned NumElts = AT->getSize().getZExtValue(); QualType EltTy = AT->getElementType(); @@ -2575,7 +2657,7 @@ for (unsigned Elt = 0; Elt < NumElts; ++Elt) { llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation()); - ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); + ExpandTypeToArgs(EltTy, EltRV, IRCallArgs, IRArgNoIter, IRFuncTy); } } else if (const RecordType *RT = Ty->getAs()) { RecordDecl *RD = RT->getDecl(); @@ -2597,29 +2679,32 @@ } if (LargestFD) { RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation()); - ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); + ExpandTypeToArgs(LargestFD->getType(), FldRV, IRCallArgs, IRArgNoIter, + IRFuncTy); } } else { for (const auto *FD : RD->fields()) { RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); - ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); + ExpandTypeToArgs(FD->getType(), FldRV, IRCallArgs, IRArgNoIter, + IRFuncTy); } } } else if (Ty->isAnyComplexType()) { ComplexPairTy CV = RV.getComplexVal(); - Args.push_back(CV.first); - Args.push_back(CV.second); + IRCallArgs[*(IRArgNoIter++)] = CV.first; + IRCallArgs[*(IRArgNoIter++)] = CV.second; } else { assert(RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."); // Insert a bitcast as needed. llvm::Value *V = RV.getScalarVal(); - if (Args.size() < IRFuncTy->getNumParams() && - V->getType() != IRFuncTy->getParamType(Args.size())) - V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); + unsigned IRArgPos = *(IRArgNoIter++); + if (IRArgPos < IRFuncTy->getNumParams() && + V->getType() != IRFuncTy->getParamType(IRArgPos)) + V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgPos)); - Args.push_back(V); + IRCallArgs[IRArgPos] = V; } } @@ -2645,15 +2730,12 @@ const Decl *TargetDecl, llvm::Instruction **callOrInvoke) { // FIXME: We no longer need the types from CallArgs; lift up and simplify. - SmallVector Args; // Handle struct-return functions by passing a pointer to the // location that we would like to return into. QualType RetTy = CallInfo.getReturnType(); const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); - // IRArgNo - Keep track of the argument number in the callee we're looking at. - unsigned IRArgNo = 0; llvm::FunctionType *IRFuncTy = cast( cast(Callee->getType())->getElementType()); @@ -2675,22 +2757,18 @@ ArgMemory = AI; } + CallArgsToIRArgsMapping IRFunctionArgs(CGM, CallInfo); + SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); + // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. llvm::Value *SRetPtr = nullptr; - bool SwapThisWithSRet = false; if (RetAI.isIndirect() || RetAI.isInAlloca()) { SRetPtr = ReturnValue.getValue(); if (!SRetPtr) SRetPtr = CreateMemTemp(RetTy); - if (RetAI.isIndirect()) { - Args.push_back(SRetPtr); - SwapThisWithSRet = RetAI.isSRetAfterThis(); - if (SwapThisWithSRet) - IRArgNo = 1; - checkArgMatches(SRetPtr, IRArgNo, IRFuncTy); - if (SwapThisWithSRet) - IRArgNo = 0; + if (IRFunctionArgs.hasSRetArg()) { + IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr; } else { llvm::Value *Addr = Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); @@ -2700,26 +2778,25 @@ assert(CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."); + unsigned ArgNo = 0; CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); - I != E; ++I, ++info_it) { + I != E; ++I, ++info_it, ++ArgNo) { const ABIArgInfo &ArgInfo = info_it->info; RValue RV = I->RV; - // Skip 'sret' if it came second. - if (IRArgNo == 1 && SwapThisWithSRet) - ++IRArgNo; - CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); // Insert a padding argument to ensure proper alignment. - if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { - Args.push_back(llvm::UndefValue::get(PaddingType)); - ++IRArgNo; - } + if (IRFunctionArgs.hasPaddingArg(ArgNo)) + IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = + llvm::UndefValue::get(ArgInfo.getPaddingType()); + + const auto &IRArgNo = IRFunctionArgs.getIRArgs(ArgNo); switch (ArgInfo.getKind()) { case ABIArgInfo::InAlloca: { + assert(IRArgNo.size() == 0); assert(getTarget().getTriple().getArch() == llvm::Triple::x86); if (RV.isAggregate()) { // Replace the placeholder with the appropriate argument slot GEP. @@ -2745,22 +2822,21 @@ LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); EmitInitStoreOfNonAggregate(*this, RV, argLV); } - break; // Don't increment IRArgNo! + break; } case ABIArgInfo::Indirect: { + assert(IRArgNo.size() == 1); + unsigned IRArgPos = IRArgNo[0]; if (RV.isScalar() || RV.isComplex()) { // Make a temporary alloca to pass the argument. llvm::AllocaInst *AI = CreateMemTemp(I->Ty); if (ArgInfo.getIndirectAlign() > AI->getAlignment()) AI->setAlignment(ArgInfo.getIndirectAlign()); - Args.push_back(AI); + IRCallArgs[IRArgPos] = AI; - LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign); + LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign); EmitInitStoreOfNonAggregate(*this, RV, argLV); - - // Validate argument match. - checkArgMatches(AI, IRArgNo, IRFuncTy); } else { // We want to avoid creating an unnecessary temporary+copy here; // however, we need one in three cases: @@ -2774,8 +2850,8 @@ unsigned Align = ArgInfo.getIndirectAlign(); const llvm::DataLayout *TD = &CGM.getDataLayout(); const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); - const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ? - IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0); + const unsigned ArgAddrSpace = (IRArgPos < IRFuncTy->getNumParams() ? + IRFuncTy->getParamType(IRArgPos)->getPointerAddressSpace() : 0); if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) || @@ -2784,23 +2860,18 @@ llvm::AllocaInst *AI = CreateMemTemp(I->Ty); if (Align > AI->getAlignment()) AI->setAlignment(Align); - Args.push_back(AI); + IRCallArgs[IRArgPos] = AI; EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); - - // Validate argument match. - checkArgMatches(AI, IRArgNo, IRFuncTy); } else { // Skip the extra memcpy call. - Args.push_back(Addr); - - // Validate argument match. - checkArgMatches(Addr, IRArgNo, IRFuncTy); + IRCallArgs[IRArgPos] = Addr; } } break; } case ABIArgInfo::Ignore: + assert(IRArgNo.size() == 0); break; case ABIArgInfo::Extend: @@ -2808,20 +2879,20 @@ if (!isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == ConvertType(info_it->type) && ArgInfo.getDirectOffset() == 0) { + assert(IRArgNo.size() == 1); + unsigned IRArgPos = IRArgNo[0]; llvm::Value *V; if (RV.isScalar()) V = RV.getScalarVal(); else V = Builder.CreateLoad(RV.getAggregateAddr()); - + // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. - if (IRArgNo < IRFuncTy->getNumParams() && - V->getType() != IRFuncTy->getParamType(IRArgNo)) - V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); - Args.push_back(V); - - checkArgMatches(V, IRArgNo, IRFuncTy); + if (IRArgPos < IRFuncTy->getNumParams() && + V->getType() != IRFuncTy->getParamType(IRArgPos)) + V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgPos)); + IRCallArgs[IRArgPos] = V; break; } @@ -2870,38 +2941,32 @@ llvm::PointerType::getUnqual(STy)); } + assert(IRArgNo.size() == STy->getNumElements()); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); // We don't know what we're loading from. LI->setAlignment(1); - Args.push_back(LI); - - // Validate argument match. - checkArgMatches(LI, IRArgNo, IRFuncTy); + IRCallArgs[IRArgNo[i]] = LI; } } else { // In the simple case, just pass the coerced loaded value. - Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), - *this)); - - // Validate argument match. - checkArgMatches(Args.back(), IRArgNo, IRFuncTy); + assert(IRArgNo.size() == 1); + unsigned IRArgPos = IRArgNo[0]; + IRCallArgs[IRArgPos] = + CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this); } break; } case ABIArgInfo::Expand: - ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); - IRArgNo = Args.size(); + auto IRArgNoIter = IRArgNo.begin(); + ExpandTypeToArgs(I->Ty, RV, IRCallArgs, IRArgNoIter, IRFuncTy); break; } } - if (SwapThisWithSRet) - std::swap(Args[0], Args[1]); - if (ArgMemory) { llvm::Value *Arg = ArgMemory; if (CallInfo.isVariadic()) { @@ -2932,7 +2997,8 @@ Arg = Builder.CreateBitCast(Arg, LastParamTy); } } - Args.push_back(Arg); + assert(IRFunctionArgs.hasInallocaArg()); + IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; } if (!CallArgs.getCleanupsToDeactivate().empty()) @@ -2951,7 +3017,7 @@ if (CE->getOpcode() == llvm::Instruction::BitCast && ActualFT->getReturnType() == CurFT->getReturnType() && ActualFT->getNumParams() == CurFT->getNumParams() && - ActualFT->getNumParams() == Args.size() && + ActualFT->getNumParams() == IRCallArgs.size() && (CurFT->isVarArg() || !ActualFT->isVarArg())) { bool ArgsMatch = true; for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) @@ -2968,6 +3034,16 @@ } } + assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); + for (unsigned i = 0; i < IRCallArgs.size(); ++i) { + // Inalloca argument can have different type. + if (IRFunctionArgs.hasInallocaArg() && + i == IRFunctionArgs.getInallocaArgNo()) + continue; + if (i < IRFuncTy->getNumParams()) + assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); + } + unsigned CallingConv; CodeGen::AttributeListType AttributeList; CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, @@ -2982,10 +3058,10 @@ llvm::CallSite CS; if (!InvokeDest) { - CS = Builder.CreateCall(Callee, Args); + CS = Builder.CreateCall(Callee, IRCallArgs); } else { llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); - CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); + CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs); EmitBlock(Cont); } if (callOrInvoke)