Index: llvm/docs/LangRef.rst =================================================================== --- llvm/docs/LangRef.rst +++ llvm/docs/LangRef.rst @@ -1014,7 +1014,7 @@ opposed to memory, though some targets use it to distinguish between two different kinds of registers). Use of this attribute is target-specific. -``byval`` +``byval`` or ``byval()`` This indicates that the pointer parameter should really be passed by value to the function. The attribute implies that a hidden copy of the pointee is made between the caller and the callee, so the callee @@ -1026,6 +1026,9 @@ ``byval`` parameters). This is not a valid attribute for return values. + The byval attribute also supports an optional type argument, which must be + the same as the pointee type of the argument. + The byval attribute also supports specifying an alignment with the align attribute. It indicates the alignment of the stack slot to form and the known alignment of the pointer specified to the call Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -188,6 +188,7 @@ bool IsSwiftSelf : 1; bool IsSwiftError : 1; uint16_t Alignment = 0; + Type *ByValType = nullptr; ArgListEntry() : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false), Index: llvm/include/llvm/IR/Argument.h =================================================================== --- llvm/include/llvm/IR/Argument.h +++ llvm/include/llvm/IR/Argument.h @@ -78,6 +78,9 @@ /// If this is a byval or inalloca argument, return its alignment. unsigned getParamAlignment() const; + /// If this is a byval argument, return its size. + Type *getParamByValType() const; + /// Return true if this argument has the nest attribute. bool hasNestAttr() const; Index: llvm/include/llvm/IR/Attributes.h =================================================================== --- llvm/include/llvm/IR/Attributes.h +++ llvm/include/llvm/IR/Attributes.h @@ -90,6 +90,7 @@ static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val = 0); static Attribute get(LLVMContext &Context, StringRef Kind, StringRef Val = StringRef()); + static Attribute get(LLVMContext &Context, AttrKind Kind, Type *Ty); /// Return a uniquified Attribute object that has the specific /// alignment set. @@ -102,6 +103,7 @@ static Attribute getWithAllocSizeArgs(LLVMContext &Context, unsigned ElemSizeArg, const Optional &NumElemsArg); + static Attribute getWithByValType(LLVMContext &Context, Type *Ty); //===--------------------------------------------------------------------===// // Attribute Accessors @@ -117,6 +119,9 @@ /// attribute. bool isStringAttribute() const; + /// Return true if the attribute is a type attribute. + bool isTypeAttribute() const; + /// Return true if the attribute is present. bool hasAttribute(AttrKind Val) const; @@ -139,6 +144,10 @@ /// attribute to be a string attribute. StringRef getValueAsString() const; + /// Return the attribute's value as a Type. This requires the attribute to be + /// a type attribute. + Type *getValueAsType() const; + /// Returns the alignment field of an attribute as a byte alignment /// value. unsigned getAlignment() const; @@ -279,6 +288,7 @@ unsigned getStackAlignment() const; uint64_t getDereferenceableBytes() const; uint64_t getDereferenceableOrNullBytes() const; + Type *getByValType() const; std::pair> getAllocSizeArgs() const; std::string getAsString(bool InAttrGrp = false) const; @@ -598,6 +608,9 @@ /// Return the alignment for the specified function parameter. unsigned getParamAlignment(unsigned ArgNo) const; + /// Return the byval type for the specified function parameter. + Type *getParamByValType(unsigned ArgNo) const; + /// Get the stack alignment. unsigned getStackAlignment(unsigned Index) const; @@ -697,6 +710,7 @@ uint64_t DerefBytes = 0; uint64_t DerefOrNullBytes = 0; uint64_t AllocSizeArgs = 0; + Type *ByValType = nullptr; public: AttrBuilder() = default; @@ -772,6 +786,9 @@ /// dereferenceable_or_null attribute exists (zero is returned otherwise). uint64_t getDereferenceableOrNullBytes() const { return DerefOrNullBytes; } + /// Retrieve the byval type. + Type *getByValType() const { return ByValType; } + /// Retrieve the allocsize args, if the allocsize attribute exists. If it /// doesn't exist, pair(0, 0) is returned. std::pair> getAllocSizeArgs() const; @@ -796,6 +813,9 @@ AttrBuilder &addAllocSizeAttr(unsigned ElemSizeArg, const Optional &NumElemsArg); + /// This turns a byval type into the form used internally in Attribute. + AttrBuilder &addByValAttr(Type *Ty); + /// Add an allocsize attribute, using the representation returned by /// Attribute.getIntValue(). AttrBuilder &addAllocSizeAttrFromRawRepr(uint64_t RawAllocSizeRepr); Index: llvm/include/llvm/IR/CallSite.h =================================================================== --- llvm/include/llvm/IR/CallSite.h +++ llvm/include/llvm/IR/CallSite.h @@ -415,6 +415,11 @@ CALLSITE_DELEGATE_GETTER(getParamAlignment(ArgNo)); } + /// Extract the byval type for a call or parameter (nullptr=unknown). + Type *getParamByValType(unsigned ArgNo) const { + CALLSITE_DELEGATE_GETTER(getParamByValType(ArgNo)); + } + /// Extract the number of dereferenceable bytes for a call or parameter /// (0=unknown). uint64_t getDereferenceableBytes(unsigned i) const { Index: llvm/include/llvm/IR/Function.h =================================================================== --- llvm/include/llvm/IR/Function.h +++ llvm/include/llvm/IR/Function.h @@ -431,6 +431,11 @@ return AttributeSets.getParamAlignment(ArgNo); } + /// Extract the byval type for a parameter (nullptr=unknown). + Type *getParamByValType(unsigned ArgNo) const { + return AttributeSets.getParamByValType(ArgNo); + } + /// Extract the number of dereferenceable bytes for a call or /// parameter (0=unknown). /// @param i AttributeList index, referring to a return value or argument. Index: llvm/include/llvm/IR/InstrTypes.h =================================================================== --- llvm/include/llvm/IR/InstrTypes.h +++ llvm/include/llvm/IR/InstrTypes.h @@ -1551,6 +1551,11 @@ return Attrs.getParamAlignment(ArgNo); } + /// Extract the byval type for a call or parameter (nullptr=unknown). + Type *getParamByValType(unsigned ArgNo) const { + return Attrs.getParamByValType(ArgNo); + } + /// Extract the number of dereferenceable bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableBytes(unsigned i) const { Index: llvm/lib/AsmParser/LLParser.h =================================================================== --- llvm/lib/AsmParser/LLParser.h +++ llvm/lib/AsmParser/LLParser.h @@ -339,6 +339,7 @@ bool ParseFnAttributeValuePairs(AttrBuilder &B, std::vector &FwdRefAttrGrps, bool inAttrGrp, LocTy &BuiltinLoc); + bool ParseByValWithOptionalType(Type *&Result); // Module Summary Index Parsing. bool SkipModuleSummaryEntry(); Index: llvm/lib/AsmParser/LLParser.cpp =================================================================== --- llvm/lib/AsmParser/LLParser.cpp +++ llvm/lib/AsmParser/LLParser.cpp @@ -1578,7 +1578,13 @@ B.addAlignmentAttr(Alignment); continue; } - case lltok::kw_byval: B.addAttribute(Attribute::ByVal); break; + case lltok::kw_byval: { + Type *Ty; + if (ParseByValWithOptionalType(Ty)) + return true; + B.addByValAttr(Ty); + continue; + } case lltok::kw_dereferenceable: { uint64_t Bytes; if (ParseOptionalDerefAttrBytes(lltok::kw_dereferenceable, Bytes)) @@ -2431,6 +2437,22 @@ return false; } +/// ParseByValWithOptionalType +/// ::= byval +/// ::= byval() +bool LLParser::ParseByValWithOptionalType(Type *&Result) { + Result = nullptr; + if (!EatIfPresent(lltok::kw_byval)) + return true; + if (!EatIfPresent(lltok::lparen)) + return false; + if (ParseType(Result)) + return true; + if (!EatIfPresent(lltok::rparen)) + return Error(Lex.getLoc(), "expected ')'"); + return false; +} + /// ParseOptionalOperandBundles /// ::= /*empty*/ /// ::= '[' OperandBundle [, OperandBundle ]* ']' Index: llvm/lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -638,6 +638,10 @@ return getFnValueByID(ValNo, Ty); } + /// Upgrades old-style typeless byval attributes by adding the corresponding + /// argument's pointee type. + void propagateByValTypes(CallBase *CB); + /// Converts alignment exponent (i.e. power of two (or zero)) to the /// corresponding alignment to use. If alignment is too large, returns /// a corresponding error code. @@ -1492,6 +1496,12 @@ if (Error Err = parseAttrKind(Record[++i], &Kind)) return Err; + // Upgrade old-style byval attribute to one with a type, even if it's + // nullptr. We will have to insert the real type when we associate + // this AttributeList with a function. + if (Kind == Attribute::ByVal) + B.addByValAttr(nullptr); + B.addAttribute(Kind); } else if (Record[i] == 1) { // Integer attribute Attribute::AttrKind Kind; @@ -1507,9 +1517,7 @@ B.addDereferenceableOrNullAttr(Record[++i]); else if (Kind == Attribute::AllocSize) B.addAllocSizeAttrFromRawRepr(Record[++i]); - } else { // String attribute - assert((Record[i] == 3 || Record[i] == 4) && - "Invalid attribute group entry"); + } else if (Record[i] == 3 || Record[i] == 4) { // String attribute bool HasValue = (Record[i++] == 4); SmallString<64> KindStr; SmallString<64> ValStr; @@ -1527,6 +1535,15 @@ } B.addAttribute(KindStr.str(), ValStr.str()); + } else { + assert((Record[i] == 5 || Record[i] == 6) && + "Invalid attribute group entry"); + bool HasType = Record[i] == 6; + Attribute::AttrKind Kind; + if (Error Err = parseAttrKind(Record[++i], &Kind)) + return Err; + if (Kind == Attribute::ByVal) + B.addByValAttr(HasType ? getTypeByID(Record[++i]) : nullptr); } } @@ -3021,6 +3038,17 @@ Func->setLinkage(getDecodedLinkage(RawLinkage)); Func->setAttributes(getAttributes(Record[4])); + // Upgrade any old-style byval without a type by propagating the argument's + // pointee type. There should be no opaque pointers where the byval type is + // implicit. + for (auto &Arg : Func->args()) { + if (Arg.hasByValAttr() && !Arg.getParamByValType()) { + Arg.removeAttr(Attribute::ByVal); + Arg.addAttr(Attribute::getWithByValType( + Context, Arg.getType()->getPointerElementType())); + } + } + unsigned Alignment; if (Error Err = parseAlignmentValue(Record[5], Alignment)) return Err; @@ -3421,6 +3449,19 @@ return Error::success(); } +void BitcodeReader::propagateByValTypes(CallBase *CB) { + for (unsigned i = 0; i < CB->getNumArgOperands(); ++i) { + if (CB->paramHasAttr(i, Attribute::ByVal) && + !CB->getAttribute(i, Attribute::ByVal).getValueAsType()) { + CB->removeParamAttr(i, Attribute::ByVal); + CB->addParamAttr( + i, Attribute::getWithByValType( + Context, + CB->getArgOperand(i)->getType()->getPointerElementType())); + } + } +} + /// Lazily parse the specified function body block. Error BitcodeReader::parseFunctionBody(Function *F) { if (Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID)) @@ -4236,6 +4277,8 @@ cast(I)->setCallingConv( static_cast(CallingConv::MaxID & CCInfo)); cast(I)->setAttributes(PAL); + propagateByValTypes(cast(I)); + break; } case bitc::FUNC_CODE_INST_RESUME: { // RESUME: [opval] @@ -4711,6 +4754,7 @@ TCK = CallInst::TCK_NoTail; cast(I)->setTailCallKind(TCK); cast(I)->setAttributes(PAL); + propagateByValTypes(cast(I)); if (FMF.any()) { if (!isa(I)) return error("Fast-math-flags specified for call without " Index: llvm/lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -747,7 +747,7 @@ Record.push_back(1); Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum())); Record.push_back(Attr.getValueAsInt()); - } else { + } else if (Attr.isStringAttribute()) { StringRef Kind = Attr.getKindAsString(); StringRef Val = Attr.getValueAsString(); @@ -758,6 +758,13 @@ Record.append(Val.begin(), Val.end()); Record.push_back(0); } + } else { + assert(Attr.isTypeAttribute()); + Type *Ty = Attr.getValueAsType(); + Record.push_back(Ty ? 6 : 5); + Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum())); + if (Ty) + Record.push_back(VE.getTypeID(Attr.getValueAsType())); } } @@ -4114,15 +4121,15 @@ // Emit blockinfo, which defines the standard abbreviations etc. writeBlockInfo(); + // Emit information describing all of the types in the module. + writeTypeTable(); + // Emit information about attribute groups. writeAttributeGroupTable(); // Emit information about parameter attributes. writeAttributeTable(); - // Emit information describing all of the types in the module. - writeTypeTable(); - writeComdats(); // Emit top-level description of module, including target triple, inline asm, Index: llvm/lib/Bitcode/Writer/ValueEnumerator.cpp =================================================================== --- llvm/lib/Bitcode/Writer/ValueEnumerator.cpp +++ llvm/lib/Bitcode/Writer/ValueEnumerator.cpp @@ -949,9 +949,11 @@ incorporateFunctionMetadata(F); // Adding function arguments to the value table. - for (const auto &I : F.args()) + for (const auto &I : F.args()) { EnumerateValue(&I); - + if (I.hasAttribute(Attribute::ByVal) && I.getParamByValType()) + EnumerateType(I.getParamByValType()); + } FirstFuncConstantID = Values.size(); // Add all function-level constants to the value table. Index: llvm/lib/CodeGen/#SwiftErrorValueTracking.cpp# =================================================================== --- /dev/null +++ llvm/lib/CodeGen/#SwiftErrorValueTracking.cpp# @@ -0,0 +1,312 @@ +//===-- SwiftErrorValueTracking.cpp --------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This implements a limited mem2reg-like analysis to promote uses of function +// arguments and allocas marked with swiftalloc from memory into virtual +// registers tracked by this class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/SwiftErrorValueTracking.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/IR/Value.h" + +using namespace llvm; + +/// Set up SwiftErrorVals by going through the function. If the function has +/// swifterror argument, it will be the first entry. +void SwiftErrorValueTracking::setFunction(MachineFunction &mf) { + MF = &mf; + Fn = &MF->getFunction(); + TLI = MF->getSubtarget().getTargetLowering(); + TII = MF->getSubtarget().getInstrInfo(); + + if (!TLI->supportSwiftError()) + return; + + SwiftErrorVals.clear(); + VRegDefMap.clear(); + VRegUpwardsUse.clear(); + VRegDefUses.clear(); + SwiftErrorArg = nullptr; + + // Check if function has a swifterror argument. + bool HaveSeenSwiftErrorArg = false; + for (Function::const_arg_iterator AI = Fn->arg_begin(), AE = Fn->arg_end(); + AI != AE; ++AI) + if (AI->hasSwiftErrorAttr()) { + assert(!HaveSeenSwiftErrorArg && + "Must have only one swifterror parameter"); + (void)HaveSeenSwiftErrorArg; // silence warning. + HaveSeenSwiftErrorArg = true; + SwiftErrorArg = &*AI; + SwiftErrorVals.push_back(&*AI); + } + + for (const auto &LLVMBB : *Fn) + for (const auto &Inst : LLVMBB) { + if (const AllocaInst *Alloca = dyn_cast(&Inst)) + if (Alloca->isSwiftError()) + SwiftErrorVals.push_back(Alloca); + } +} + +unsigned SwiftErrorValueTracking::getOrCreateVReg(const MachineBasicBlock *MBB, + const Value *Val) { + auto Key = std::make_pair(MBB, Val); + auto It = VRegDefMap.find(Key); + // If this is the first use of this swifterror value in this basic block, + // create a new virtual register. + // After we processed all basic blocks we will satisfy this "upwards exposed + // use" by inserting a copy or phi at the beginning of this block. + if (It == VRegDefMap.end()) { + auto &DL = MF->getDataLayout(); + const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); + auto VReg = MF->getRegInfo().createVirtualRegister(RC); + VRegDefMap[Key] = VReg; + VRegUpwardsUse[Key] = VReg; + return VReg; + } else + return It->second; +} + +void SwiftErrorValueTracking::setCurrentVReg(const MachineBasicBlock *MBB, + const Value *Val, unsigned VReg) { + VRegDefMap[std::make_pair(MBB, Val)] = VReg; +} + +unsigned SwiftErrorValueTracking::getOrCreateVRegDefAt( + const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) { + auto Key = PointerIntPair(I, true); + auto It = VRegDefUses.find(Key); + if (It != VRegDefUses.end()) + return It->second; + + auto &DL = MF->getDataLayout(); + const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); + unsigned VReg = MF->getRegInfo().createVirtualRegister(RC); + VRegDefUses[Key] = VReg; + setCurrentVReg(MBB, Val, VReg); + return VReg; +} + +unsigned SwiftErrorValueTracking::getOrCreateVRegUseAt( + const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) { + auto Key = PointerIntPair(I, false); + auto It = VRegDefUses.find(Key); + if (It != VRegDefUses.end()) + return It->second; + + unsigned VReg = getOrCreateVReg(MBB, Val); + VRegDefUses[Key] = VReg; + return VReg; +} + +bool SwiftErrorValueTracking::createEntriesInEntryBlock(DebugLoc DbgLoc) { + if (!TLI->supportSwiftError()) + return false; + + // We only need to do this when we have swifterror parameter or swifterror + // alloc. + if (SwiftErrorVals.empty()) + return false; + + MachineBasicBlock *MBB = &*MF->begin(); + auto &DL = MF->getDataLayout(); + auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); + bool Inserted = false; + for (const auto *SwiftErrorVal : SwiftErrorVals) { + // We will always generate a copy from the argument. It is always used at + // least by the 'return' of the swifterror. + if (SwiftErrorArg && SwiftErrorArg == SwiftErrorVal) + continue; + unsigned VReg = MF->getRegInfo().createVirtualRegister(RC); + // Assign Undef to Vreg. We construct MI directly to make sure it works + // with FastISel. + BuildMI(*MBB, MBB->getFirstNonPHI(), DbgLoc, + TII->get(TargetOpcode::IMPLICIT_DEF), VReg); + + setCurrentVReg(MBB, SwiftErrorVal, VReg); + Inserted = true; + } + + return Inserted; +} + +/// Propagate swifterror values through the machine function CFG. +void SwiftErrorValueTracking::propagateVRegs() { + if (!TLI->supportSwiftError()) + return; + + // We only need to do this when we have swifterror parameter or swifterror + // alloc. + if (SwiftErrorVals.empty()) + return; + + // For each machine basic block in reverse post order. + ReversePostOrderTraversal RPOT(MF); + for (MachineBasicBlock *MBB : RPOT) { + // For each swifterror value in the function. + for (const auto *SwiftErrorVal : SwiftErrorVals) { + auto Key = std::make_pair(MBB, SwiftErrorVal); + auto UUseIt = VRegUpwardsUse.find(Key); + auto VRegDefIt = VRegDefMap.find(Key); + bool UpwardsUse = UUseIt != VRegUpwardsUse.end(); + unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0; + bool DownwardDef = VRegDefIt != VRegDefMap.end(); + assert(!(UpwardsUse && !DownwardDef) && + "We can't have an upwards use but no downwards def"); + + // If there is no upwards exposed use and an entry for the swifterror in + // the def map for this value we don't need to do anything: We already + // have a downward def for this basic block. + if (!UpwardsUse && DownwardDef) + continue; + + // Otherwise we either have an upwards exposed use vreg that we need to + // materialize or need to forward the downward def from predecessors. + + // Check whether we have a single vreg def from all predecessors. + // Otherwise we need a phi. + SmallVector, 4> VRegs; + SmallSet Visited; + for (auto *Pred : MBB->predecessors()) { + if (!Visited.insert(Pred).second) + continue; + VRegs.push_back(std::make_pair( + Pred, getOrCreateVReg(Pred, SwiftErrorVal))); + if (Pred != MBB) + continue; + // We have a self-edge. + // If there was no upwards use in this basic block there is now one: the + // phi needs to use it self. + if (!UpwardsUse) { + UpwardsUse = true; + UUseIt = VRegUpwardsUse.find(Key); + assert(UUseIt != VRegUpwardsUse.end()); + UUseVReg = UUseIt->second; + } + } + + // We need a phi node if we have more than one predecessor with different + // downward defs. + bool needPHI = + VRegs.size() >= 1 && + std::find_if( + VRegs.begin(), VRegs.end(), + [&](const std::pair &V) + -> bool { return V.second != VRegs[0].second; }) != + VRegs.end(); + + // If there is no upwards exposed used and we don't need a phi just + // forward the swifterror vreg from the predecessor(s). + if (!UpwardsUse && !needPHI) { + assert(!VRegs.empty() && + "No predecessors? The entry block should bail out earlier"); + // Just forward the swifterror vreg from the predecessor(s). + setCurrentVReg(MBB, SwiftErrorVal, VRegs[0].second); + continue; + } + + auto DLoc = isa(SwiftErrorVal) + ? cast(SwiftErrorVal)->getDebugLoc() + : DebugLoc(); + const auto *TII = MF->getSubtarget().getInstrInfo(); + + // If we don't need a phi create a copy to the upward exposed vreg. + if (!needPHI) { + assert(UpwardsUse); + assert(!VRegs.empty() && + "No predecessors? Is the Calling Convention correct?"); + unsigned DestReg = UUseVReg; + BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY), + DestReg) + .addReg(VRegs[0].second); + continue; + } + + // We need a phi: if there is an upwards exposed use we already have a + // destination virtual register number otherwise we generate a new one. + auto &DL = MF->getDataLayout(); + auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); + unsigned PHIVReg = + UpwardsUse ? UUseVReg : MF->getRegInfo().createVirtualRegister(RC); + MachineInstrBuilder PHI = + BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, + TII->get(TargetOpcode::PHI), PHIVReg); + for (auto BBRegPair : VRegs) { + PHI.addReg(BBRegPair.second).addMBB(BBRegPair.first); + } + + // We did not have a definition in this block before: store the phi's vreg + // as this block downward exposed def. + if (!UpwardsUse) + setCurrentVReg(MBB, SwiftErrorVal, PHIVReg); + } + } +} + +void SwiftErrorValueTracking::preassignVRegs( + MachineBasicBlock *MBB, BasicBlock::const_iterator Begin, + BasicBlock::const_iterator End) { + if (!TLI->supportSwiftError() || SwiftErrorVals.empty()) + return; + + // Iterator over instructions and assign vregs to swifterror defs and uses. + for (auto It = Begin; It != End; ++It) { + ImmutableCallSite CS(&*It); + if (CS) { + // A call-site with a swifterror argument is both use and def. + const Value *SwiftErrorAddr = nullptr; + for (auto &Arg : CS.args()) { + if (!Arg->isSwiftError()) + continue; + // Use of swifterror. + assert(!SwiftErrorAddr && "Cannot have multiple swifterror arguments"); + SwiftErrorAddr = &*Arg; + assert(SwiftErrorAddr->isSwiftError() && + "Must have a swifterror value argument"); + getOrCreateVRegUseAt(&*It, MBB, SwiftErrorAddr); + } + if (!SwiftErrorAddr) + continue; + + // Def of swifterror. + getOrCreateVRegDefAt(&*It, MBB, SwiftErrorAddr); + + // A load is a use. + } else if (const LoadInst *LI = dyn_cast(&*It)) { + const Value *V = LI->getOperand(0); + if (!V->isSwiftError()) + continue; + + getOrCreateVRegUseAt(LI, MBB, V); + + // A store is a def. + } else if (const StoreInst *SI = dyn_cast(&*It)) { + const Value *SwiftErrorAddr = SI->getOperand(1); + if (!SwiftErrorAddr->isSwiftError()) + continue; + + // Def of swifterror. + getOrCreateVRegDefAt(&*It, MBB, SwiftErrorAddr); + + // A return in a swiferror returning function is a use. + } else if (const ReturnInst *R = dyn_cast(&*It)) { + const Function *F = R->getParent()->getParent(); + if (!F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) + continue; + + getOrCreateVRegUseAt(R, MBB, SwiftErrorArg); + } + } +} Index: llvm/lib/CodeGen/GlobalISel/CallLowering.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -87,7 +87,10 @@ if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) { Type *ElementTy = cast(Arg.Ty)->getElementType(); - Arg.Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); + + auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); + Arg.Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); + // For ByVal, alignment should be passed from FE. BE will guess if // this info is not there but there are cases it cannot get right. unsigned FrameAlign; Index: llvm/lib/CodeGen/SelectionDAG/FastISel.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -1204,9 +1204,11 @@ if (Arg.IsByVal || Arg.IsInAlloca) { PointerType *Ty = cast(Arg.Ty); Type *ElementTy = Ty->getElementType(); - unsigned FrameSize = DL.getTypeAllocSize(ElementTy); - // For ByVal, alignment should come from FE. BE will guess if this info is - // not there, but there are cases it cannot get right. + unsigned FrameSize = + DL.getTypeAllocSize(Arg.ByValType ? Arg.ByValType : ElementTy); + + // For ByVal, alignment should come from FE. BE will guess if this info + // is not there, but there are cases it cannot get right. unsigned FrameAlign = Arg.Alignment; if (!FrameAlign) FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL); Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -9072,8 +9072,11 @@ if (Args[i].IsByVal || Args[i].IsInAlloca) { PointerType *Ty = cast(Args[i].Ty); Type *ElementTy = Ty->getElementType(); - Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); - // For ByVal, alignment should come from FE. BE will guess if this + + unsigned FrameSize = DL.getTypeAllocSize( + Args[i].ByValType ? Args[i].ByValType : ElementTy); + Flags.setByValSize(FrameSize); + // info is not there but there are cases it cannot get right. unsigned FrameAlign; if (Args[i].Alignment) @@ -9570,9 +9573,14 @@ if (Flags.isByVal() || Flags.isInAlloca()) { PointerType *Ty = cast(Arg.getType()); Type *ElementTy = Ty->getElementType(); - Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); - // For ByVal, alignment should be passed from FE. BE will guess if - // this info is not there but there are cases it cannot get right. + + // For ByVal, size and alignment should be passed from FE. BE will + // guess if this info is not there but there are cases it cannot get + // right. + unsigned FrameSize = DL.getTypeAllocSize( + Arg.getParamByValType() ? Arg.getParamByValType() : ElementTy); + Flags.setByValSize(FrameSize); + unsigned FrameAlign; if (Arg.getParamAlignment()) FrameAlign = Arg.getParamAlignment(); Index: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -112,6 +112,7 @@ IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); Alignment = Call->getParamAlignment(ArgIdx); + ByValType = Call->getParamByValType(ArgIdx); } /// Generate a libcall taking the given operands as arguments and returning a Index: llvm/lib/IR/AttributeImpl.h =================================================================== --- llvm/lib/IR/AttributeImpl.h +++ llvm/lib/IR/AttributeImpl.h @@ -29,6 +29,7 @@ namespace llvm { class LLVMContext; +class Type; //===----------------------------------------------------------------------===// /// \class @@ -41,7 +42,8 @@ enum AttrEntryKind { EnumAttrEntry, IntAttrEntry, - StringAttrEntry + StringAttrEntry, + TypeAttrEntry, }; AttributeImpl(AttrEntryKind KindID) : KindID(KindID) {} @@ -56,6 +58,7 @@ bool isEnumAttribute() const { return KindID == EnumAttrEntry; } bool isIntAttribute() const { return KindID == IntAttrEntry; } bool isStringAttribute() const { return KindID == StringAttrEntry; } + bool isTypeAttribute() const { return KindID == TypeAttrEntry; } bool hasAttribute(Attribute::AttrKind A) const; bool hasAttribute(StringRef Kind) const; @@ -66,16 +69,20 @@ StringRef getKindAsString() const; StringRef getValueAsString() const; + Type *getValueAsType() const; + /// Used when sorting the attributes. bool operator<(const AttributeImpl &AI) const; void Profile(FoldingSetNodeID &ID) const { if (isEnumAttribute()) - Profile(ID, getKindAsEnum(), 0); + Profile(ID, getKindAsEnum(), static_cast(0)); else if (isIntAttribute()) Profile(ID, getKindAsEnum(), getValueAsInt()); - else + else if (isStringAttribute()) Profile(ID, getKindAsString(), getValueAsString()); + else + Profile(ID, getKindAsEnum(), getValueAsType()); } static void Profile(FoldingSetNodeID &ID, Attribute::AttrKind Kind, @@ -88,6 +95,12 @@ ID.AddString(Kind); if (!Values.empty()) ID.AddString(Values); } + + static void Profile(FoldingSetNodeID &ID, Attribute::AttrKind Kind, + Type *Ty) { + ID.AddInteger(Kind); + ID.AddPointer(Ty); + } }; //===----------------------------------------------------------------------===// @@ -145,6 +158,18 @@ StringRef getStringValue() const { return Val; } }; +class TypeAttributeImpl : public EnumAttributeImpl { + virtual void anchor(); + + Type *Ty; + +public: + TypeAttributeImpl(Attribute::AttrKind Kind, Type *Ty) + : EnumAttributeImpl(TypeAttrEntry, Kind), Ty(Ty) {} + + Type *getTypeValue() const { return Ty; } +}; + //===----------------------------------------------------------------------===// /// \class /// This class represents a group of attributes that apply to one @@ -189,6 +214,7 @@ uint64_t getDereferenceableOrNullBytes() const; std::pair> getAllocSizeArgs() const; std::string getAsString(bool InAttrGrp) const; + Type *getByValType() const; using iterator = const Attribute *; Index: llvm/lib/IR/Attributes.cpp =================================================================== --- llvm/lib/IR/Attributes.cpp +++ llvm/lib/IR/Attributes.cpp @@ -121,6 +121,27 @@ return Attribute(PA); } +Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind, + Type *Ty) { + LLVMContextImpl *pImpl = Context.pImpl; + FoldingSetNodeID ID; + ID.AddInteger(Kind); + ID.AddPointer(Ty); + + void *InsertPoint; + AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint); + + if (!PA) { + // If we didn't find any existing attributes of the same shape then create a + // new one and insert it. + PA = new TypeAttributeImpl(Kind, Ty); + pImpl->AttrsSet.InsertNode(PA, InsertPoint); + } + + // Return the Attribute that we found or created. + return Attribute(PA); +} + Attribute Attribute::getWithAlignment(LLVMContext &Context, uint64_t Align) { assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); assert(Align <= 0x40000000 && "Alignment too large."); @@ -146,6 +167,10 @@ return get(Context, DereferenceableOrNull, Bytes); } +Attribute Attribute::getWithByValType(LLVMContext &Context, Type *Ty) { + return get(Context, ByVal, Ty); +} + Attribute Attribute::getWithAllocSizeArgs(LLVMContext &Context, unsigned ElemSizeArg, const Optional &NumElemsArg) { @@ -170,9 +195,13 @@ return pImpl && pImpl->isStringAttribute(); } +bool Attribute::isTypeAttribute() const { + return pImpl && pImpl->isTypeAttribute(); +} + Attribute::AttrKind Attribute::getKindAsEnum() const { if (!pImpl) return None; - assert((isEnumAttribute() || isIntAttribute()) && + assert((isEnumAttribute() || isIntAttribute() || isTypeAttribute()) && "Invalid attribute type to get the kind as an enum!"); return pImpl->getKindAsEnum(); } @@ -198,6 +227,14 @@ return pImpl->getValueAsString(); } +Type *Attribute::getValueAsType() const { + if (!pImpl) return {}; + assert(isTypeAttribute() && + "Invalid attribute type to get the value as a type!"); + return pImpl->getValueAsType(); +} + + bool Attribute::hasAttribute(AttrKind Kind) const { return (pImpl && pImpl->hasAttribute(Kind)) || (!pImpl && Kind == None); } @@ -252,8 +289,6 @@ return "argmemonly"; if (hasAttribute(Attribute::Builtin)) return "builtin"; - if (hasAttribute(Attribute::ByVal)) - return "byval"; if (hasAttribute(Attribute::Convergent)) return "convergent"; if (hasAttribute(Attribute::SwiftError)) @@ -353,6 +388,20 @@ if (hasAttribute(Attribute::ImmArg)) return "immarg"; + if (hasAttribute(Attribute::ByVal)) { + std::string Result; + Result += "byval"; + Type *Ty = getValueAsType(); + if (Ty) { + raw_string_ostream OS(Result); + Result += '('; + Ty->print(OS, false, true); + OS.flush(); + Result += ')'; + } + return Result; + } + // FIXME: These should be output like this: // // align=4 @@ -451,6 +500,8 @@ void StringAttributeImpl::anchor() {} +void TypeAttributeImpl::anchor() {} + bool AttributeImpl::hasAttribute(Attribute::AttrKind A) const { if (isStringAttribute()) return false; return getKindAsEnum() == A; @@ -462,7 +513,7 @@ } Attribute::AttrKind AttributeImpl::getKindAsEnum() const { - assert(isEnumAttribute() || isIntAttribute()); + assert(isEnumAttribute() || isIntAttribute() || isTypeAttribute()); return static_cast(this)->getEnumKind(); } @@ -481,6 +532,11 @@ return static_cast(this)->getStringValue(); } +Type *AttributeImpl::getValueAsType() const { + assert(isTypeAttribute()); + return static_cast(this)->getTypeValue(); +} + bool AttributeImpl::operator<(const AttributeImpl &AI) const { // This sorts the attributes with Attribute::AttrKinds coming first (sorted // relative to their enum value) and then strings. @@ -488,10 +544,23 @@ if (AI.isEnumAttribute()) return getKindAsEnum() < AI.getKindAsEnum(); if (AI.isIntAttribute()) return true; if (AI.isStringAttribute()) return true; + if (AI.isTypeAttribute()) return true; + } + + if (isTypeAttribute()) { + if (AI.isEnumAttribute()) return false; + if (AI.isTypeAttribute()) { + if (getKindAsEnum() == AI.getKindAsEnum()) + return getValueAsType() < AI.getValueAsType(); + return getKindAsEnum() < AI.getKindAsEnum(); + } + if (AI.isIntAttribute()) return true; + if (AI.isStringAttribute()) return true; } if (isIntAttribute()) { if (AI.isEnumAttribute()) return false; + if (AI.isTypeAttribute()) return false; if (AI.isIntAttribute()) { if (getKindAsEnum() == AI.getKindAsEnum()) return getValueAsInt() < AI.getValueAsInt(); @@ -500,7 +569,9 @@ if (AI.isStringAttribute()) return true; } + assert(isStringAttribute()); if (AI.isEnumAttribute()) return false; + if (AI.isTypeAttribute()) return false; if (AI.isIntAttribute()) return false; if (getKindAsString() == AI.getKindAsString()) return getValueAsString() < AI.getValueAsString(); @@ -608,6 +679,10 @@ return SetNode ? SetNode->getDereferenceableOrNullBytes() : 0; } +Type *AttributeSet::getByValType() const { + return SetNode ? SetNode->getByValType() : nullptr; +} + std::pair> AttributeSet::getAllocSizeArgs() const { return SetNode ? SetNode->getAllocSizeArgs() : std::pair>(0, 0); @@ -691,6 +766,9 @@ Attribute Attr; switch (Kind) { + case Attribute::ByVal: + Attr = Attribute::getWithByValType(C, B.getByValType()); + break; case Attribute::Alignment: Attr = Attribute::getWithAlignment(C, B.getAlignment()); break; @@ -760,6 +838,13 @@ return 0; } +Type *AttributeSetNode::getByValType() const { + for (const auto I : *this) + if (I.hasAttribute(Attribute::ByVal)) + return I.getValueAsType(); + return 0; +} + uint64_t AttributeSetNode::getDereferenceableBytes() const { for (const auto I : *this) if (I.hasAttribute(Attribute::Dereferenceable)) @@ -1258,6 +1343,11 @@ return getAttributes(ArgNo + FirstArgIndex).getAlignment(); } +Type *AttributeList::getParamByValType(unsigned Index) const { + return getAttributes(Index+FirstArgIndex).getByValType(); +} + + unsigned AttributeList::getStackAlignment(unsigned Index) const { return getAttributes(Index).getStackAlignment(); } @@ -1336,6 +1426,7 @@ TargetDepAttrs.clear(); Alignment = StackAlignment = DerefBytes = DerefOrNullBytes = 0; AllocSizeArgs = 0; + ByValType = nullptr; } AttrBuilder &AttrBuilder::addAttribute(Attribute::AttrKind Val) { @@ -1360,6 +1451,8 @@ Alignment = Attr.getAlignment(); else if (Kind == Attribute::StackAlignment) StackAlignment = Attr.getStackAlignment(); + else if (Kind == Attribute::ByVal) + ByValType = Attr.getValueAsType(); else if (Kind == Attribute::Dereferenceable) DerefBytes = Attr.getDereferenceableBytes(); else if (Kind == Attribute::DereferenceableOrNull) @@ -1382,6 +1475,8 @@ Alignment = 0; else if (Val == Attribute::StackAlignment) StackAlignment = 0; + else if (Val == Attribute::ByVal) + ByValType = nullptr; else if (Val == Attribute::Dereferenceable) DerefBytes = 0; else if (Val == Attribute::DereferenceableOrNull) @@ -1464,6 +1559,12 @@ return *this; } +AttrBuilder &AttrBuilder::addByValAttr(Type *Ty) { + Attrs[Attribute::ByVal] = true; + ByValType = Ty; + return *this; +} + AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) { // FIXME: What if both have alignments, but they don't match?! if (!Alignment) Index: llvm/lib/IR/Function.cpp =================================================================== --- llvm/lib/IR/Function.cpp +++ llvm/lib/IR/Function.cpp @@ -113,6 +113,11 @@ return getParent()->getParamAlignment(getArgNo()); } +Type *Argument::getParamByValType() const { + assert(getType()->isPointerTy() && "Only pointers have byval types"); + return getParent()->getParamByValType(getArgNo()); +} + uint64_t Argument::getDereferenceableBytes() const { assert(getType()->isPointerTy() && "Only pointers have dereferenceable bytes"); Index: llvm/lib/IR/Verifier.cpp =================================================================== --- llvm/lib/IR/Verifier.cpp +++ llvm/lib/IR/Verifier.cpp @@ -1629,6 +1629,11 @@ "'noinline and alwaysinline' are incompatible!", V); + if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) { + Assert(Attrs.getByValType() == cast(Ty)->getElementType(), + "Attribute 'byval' type does not match parameter!"); + } + AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty); Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs), "Wrong types for attribute: " + Index: llvm/test/Assembler/#invalid-size2.ll# =================================================================== --- /dev/null +++ llvm/test/Assembler/#invalid-size2.ll# @@ -0,0 +1,4 @@ +; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s + +; CHECK: error: invalid use of parameter-only attribute on a function +declare i8* @ret_size() byvsize 4 Index: llvm/test/Assembler/byval-type-attr.ll =================================================================== --- /dev/null +++ llvm/test/Assembler/byval-type-attr.ll @@ -0,0 +1,31 @@ +; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s + +; CHECK: define void @foo(i32* byval(i32) align 4) +define void @foo(i32* byval(i32) align 4) { + ret void +} + +; CHECK: define void @bar({ i32*, i8 }* byval({ i32*, i8 }) align 4) +define void @bar({i32*, i8}* byval({i32*, i8}) align 4) { + ret void +} + +define void @caller({ i32*, i8 }* %ptr) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +; CHECK: call void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr) +; CHECK: invoke void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr) + call void @bar({i32*, i8}* byval %ptr) + invoke void @bar({i32*, i8}* byval %ptr) to label %success unwind label %fail + +success: + ret void + +fail: + landingpad { i8*, i32 } cleanup + ret void +} + +; CHECK: declare void @baz([8 x i8]* byval([8 x i8])) +%named_type = type [8 x i8] +declare void @baz(%named_type* byval(%named_type)) + +declare i32 @__gxx_personality_v0(...) Index: llvm/test/Assembler/invalid-byval-type1.ll =================================================================== --- /dev/null +++ llvm/test/Assembler/invalid-byval-type1.ll @@ -0,0 +1,4 @@ +; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s + +; CHECK: Attribute 'byval' type does not match parameter! +declare void @foo(i32* byval(i8)) Index: llvm/test/Assembler/invalid-byval-type2.ll =================================================================== --- /dev/null +++ llvm/test/Assembler/invalid-byval-type2.ll @@ -0,0 +1,4 @@ +; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s + +; CHECK: error: void type only allowed for function results +declare void @foo(i32* byval(void)) Index: llvm/test/Assembler/invalid-byval-type3.ll =================================================================== --- /dev/null +++ llvm/test/Assembler/invalid-byval-type3.ll @@ -0,0 +1,4 @@ +; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s + +; CHECK: Attributes 'byval' and 'inalloca' do not support unsized types! +declare void @foo(void()* byval(void())) Index: llvm/test/Assembler/invalid-size1.ll =================================================================== --- /dev/null +++ llvm/test/Assembler/invalid-size1.ll @@ -0,0 +1,4 @@ +; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s + +; CHECK: error: expected function name +declare i8* size 4 @ret_size() Index: llvm/test/Bitcode/attributes-3.3.ll =================================================================== --- llvm/test/Bitcode/attributes-3.3.ll +++ llvm/test/Bitcode/attributes-3.3.ll @@ -48,7 +48,7 @@ } define void @f8(i8* byval) -; CHECK: define void @f8(i8* byval) +; CHECK: define void @f8(i8* byval(i8)) { ret void; } Index: llvm/test/Bitcode/attributes.ll =================================================================== --- llvm/test/Bitcode/attributes.ll +++ llvm/test/Bitcode/attributes.ll @@ -45,7 +45,7 @@ } define void @f8(i8* byval) -; CHECK: define void @f8(i8* byval) +; CHECK: define void @f8(i8* byval(i8)) { ret void; } Index: llvm/test/Bitcode/byval-upgrade.test =================================================================== --- /dev/null +++ llvm/test/Bitcode/byval-upgrade.test @@ -0,0 +1,7 @@ +RUN: llvm-dis %p/Inputs/byval-upgrade.bc -o - | FileCheck %s + +Make sure we upgrade old-stile IntAttribute byval records to a fully typed +version correctly. + +CHECK: call void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr) +CHECK: invoke void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr) Index: llvm/test/Bitcode/compatibility-3.6.ll =================================================================== --- llvm/test/Bitcode/compatibility-3.6.ll +++ llvm/test/Bitcode/compatibility-3.6.ll @@ -404,7 +404,7 @@ declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: llvm/test/Bitcode/compatibility-3.7.ll =================================================================== --- llvm/test/Bitcode/compatibility-3.7.ll +++ llvm/test/Bitcode/compatibility-3.7.ll @@ -410,7 +410,7 @@ declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: llvm/test/Bitcode/compatibility-3.8.ll =================================================================== --- llvm/test/Bitcode/compatibility-3.8.ll +++ llvm/test/Bitcode/compatibility-3.8.ll @@ -435,7 +435,7 @@ declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: llvm/test/Bitcode/compatibility-3.9.ll =================================================================== --- llvm/test/Bitcode/compatibility-3.9.ll +++ llvm/test/Bitcode/compatibility-3.9.ll @@ -504,7 +504,7 @@ declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: llvm/test/Bitcode/compatibility-4.0.ll =================================================================== --- llvm/test/Bitcode/compatibility-4.0.ll +++ llvm/test/Bitcode/compatibility-4.0.ll @@ -504,7 +504,7 @@ declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: llvm/test/Bitcode/compatibility-5.0.ll =================================================================== --- llvm/test/Bitcode/compatibility-5.0.ll +++ llvm/test/Bitcode/compatibility-5.0.ll @@ -508,7 +508,7 @@ declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: llvm/test/Bitcode/compatibility-6.0.ll =================================================================== --- llvm/test/Bitcode/compatibility-6.0.ll +++ llvm/test/Bitcode/compatibility-6.0.ll @@ -515,7 +515,7 @@ declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: llvm/test/Bitcode/compatibility.ll =================================================================== --- llvm/test/Bitcode/compatibility.ll +++ llvm/test/Bitcode/compatibility.ll @@ -517,7 +517,7 @@ declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) @@ -1713,6 +1713,15 @@ declare void @llvm.test.immarg.intrinsic(i32 immarg) ; CHECK: declare void @llvm.test.immarg.intrinsic(i32 immarg) +; byval attribute with type +%named_type = type [8 x i8] +declare void @byval_type(i32* byval(i32) align 2) +declare void @byval_type2({ i8, i8* }* byval({ i8, i8* })) +declare void @byval_named_type(%named_type* byval(%named_type)) +; CHECK: declare void @byval_type(i32* byval(i32) align 2) +; CHECK: declare void @byval_type2({ i8, i8* }* byval({ i8, i8* })) +; CHECK: declare void @byval_named_type([8 x i8]* byval([8 x i8])) + ; CHECK: attributes #0 = { alignstack=4 } ; CHECK: attributes #1 = { alignstack=8 } ; CHECK: attributes #2 = { alwaysinline } Index: llvm/test/Bitcode/highLevelStructure.3.2.ll =================================================================== --- llvm/test/Bitcode/highLevelStructure.3.2.ll +++ llvm/test/Bitcode/highLevelStructure.3.2.ll @@ -41,7 +41,7 @@ declare void @ParamAttr4(i8 signext) ; CHECK: declare void @ParamAttr5(i8* inreg) declare void @ParamAttr5(i8* inreg) -; CHECK: declare void @ParamAttr6(i8* byval) +; CHECK: declare void @ParamAttr6(i8* byval(i8)) declare void @ParamAttr6(i8* byval) ; CHECK: declare void @ParamAttr7(i8* noalias) declare void @ParamAttr7(i8* noalias) @@ -51,7 +51,7 @@ declare void @ParamAttr9(i8* nest noalias nocapture) ; CHECK: declare void @ParamAttr10{{[(i8* sret noalias nocapture) | (i8* noalias nocapture sret)]}} declare void @ParamAttr10(i8* sret noalias nocapture) -;CHECK: declare void @ParamAttr11{{[(i8* byval noalias nocapture) | (i8* noalias nocapture byval)]}} +;CHECK: declare void @ParamAttr11{{[(i8* byval(i8) noalias nocapture) | (i8* noalias nocapture byval(i8))]}} declare void @ParamAttr11(i8* byval noalias nocapture) ;CHECK: declare void @ParamAttr12{{[(i8* inreg noalias nocapture) | (i8* noalias nocapture inreg)]}} declare void @ParamAttr12(i8* inreg noalias nocapture) Index: llvm/test/CodeGen/AArch64/byval-type.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/byval-type.ll @@ -0,0 +1,37 @@ +; RUN: llc -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s + +define i8 @byval_match(i8* byval(i8) align 1, i8* byval %ptr) { +; CHECK-LABEL: byval_match: +; CHECK: ldrb w0, [sp, #8] + %res = load i8, i8* %ptr + ret i8 %res +} + +define void @caller_match(i8* %p0, i8* %p1) { +; CHECK-LABEL: caller_match: +; CHECK: ldrb [[P1:w[0-9]+]], [x1] +; CHECK: strb [[P1]], [sp, #8] +; CHECK: ldrb [[P0:w[0-9]+]], [x0] +; CHECK: strb [[P0]], [sp] +; CHECK: bl byval_match + call i8 @byval_match(i8* byval(i8) align 1 %p0, i8* byval %p1) + ret void +} + +define i8 @byval_large([3 x i64]* byval([3 x i64]) align 8, i8* byval %ptr) { +; CHECK-LABEL: byval_large: +; CHECK: ldrb w0, [sp, #24] + %res = load i8, i8* %ptr + ret i8 %res +} + +define void @caller_large([3 x i64]* %p0, i8* %p1) { +; CHECK-LABEL: caller_large: +; CHECK: ldr [[P0HI:x[0-9]+]], [x0, #16] +; CHECK: ldr [[P0LO:q[0-9]+]], [x0] +; CHECK: str [[P0HI]], [sp, #16] +; CHECK: str [[P0LO]], [sp] +; CHECK: bl byval_large + call i8 @byval_large([3 x i64]* byval([3 x i64]) align 8 %p0, i8* byval %p1) + ret void +} Index: llvm/test/Transforms/Inline/byval-tail-call.ll =================================================================== --- llvm/test/Transforms/Inline/byval-tail-call.ll +++ llvm/test/Transforms/Inline/byval-tail-call.ll @@ -56,7 +56,7 @@ ; CHECK: %[[POS:.*]] = alloca i32 ; CHECK: %[[VAL:.*]] = load i32, i32* %x ; CHECK: store i32 %[[VAL]], i32* %[[POS]] -; CHECK: tail call void @ext2(i32* byval nonnull %[[POS]] +; CHECK: tail call void @ext2(i32* nonnull byval %[[POS]] ; CHECK: ret void tail call void @bar2(i32* byval %x) ret void @@ -67,7 +67,7 @@ ; CHECK: %[[POS:.*]] = alloca i32 ; CHECK: %[[VAL:.*]] = load i32, i32* %x ; CHECK: store i32 %[[VAL]], i32* %[[POS]] -; CHECK: tail call void @ext2(i32* byval nonnull %[[POS]] +; CHECK: tail call void @ext2(i32* nonnull byval %[[POS]] ; CHECK: ret void %x = alloca i32 tail call void @bar2(i32* byval %x) Index: llvm/unittests/IR/AttributesTest.cpp =================================================================== --- llvm/unittests/IR/AttributesTest.cpp +++ llvm/unittests/IR/AttributesTest.cpp @@ -8,6 +8,7 @@ #include "llvm/IR/Attributes.h" #include "llvm/IR/LLVMContext.h" +#include "llvm/IR/DerivedTypes.h" #include "gtest/gtest.h" using namespace llvm; @@ -166,4 +167,19 @@ EXPECT_EQ(2U, AL.getNumAttrSets()); } +TEST(Attributes, StringRepresentation) { + LLVMContext C; + StructType *Ty = StructType::create(Type::getInt32Ty(C), "mystruct"); + + // Insufficiently careful printing can result in byval(%mystruct = { i32 }) + Attribute A = Attribute::getWithByValType(C, Ty); + EXPECT_EQ(A.getAsString(), "byval(%mystruct)"); + + A = Attribute::getWithByValType(C, nullptr); + EXPECT_EQ(A.getAsString(), "byval"); + + A = Attribute::getWithByValType(C, Type::getInt32Ty(C)); + EXPECT_EQ(A.getAsString(), "byval(i32)"); +} + } // end anonymous namespace